open-source-search-engine/PageOverview.cpp
2013-08-02 13:12:24 -07:00

4006 lines
197 KiB
C++
Raw Blame History

#include "gb-include.h"
#include "TcpSocket.h"
#include "HttpRequest.h"
#include "Pages.h"
#include "Spider.h" // MAX_SPIDERS
#include "Users.h"
bool sendPageOverview ( TcpSocket *s , HttpRequest *r ) {
char buf [ 256*1024 ];
char *p = buf;
char *pend = buf + 256*1024;
// . print standard header
// . do not print big links if only an assassin, just print host ids
p = g_pages.printAdminTop ( p , pend , s , r );
//long user = g_pages.getUserType ( s , r );
//sprintf ( p ,
//"<html> \n"
//"<title>Gigablast Admin Overview</title>\n"
//"\n"
//"<body text=#000000 bgcolor=#ffffff link=#000000 vlink=#000000 alink=#000000><style><!--body,td,a,p,.h{font-family:arial,sans-serif; font-size: 15px;}//--></style>\n"
//"\n"
// );
// p += gbstrlen ( p );
sprintf ( p ,
//"<center><b><font color=red size=+3>Confidential</font></b></center>\n"
"<center><b><font color=red>This Document is Confidential</font></b></center>\n"
"<br>"
"\n"
"\n"
);
//if ( user == USER_MASTER ) p += gbstrlen ( p );
if ( g_users.hasPermission( r, PAGE_MASTER ) ) p += gbstrlen(p);
sprintf ( p ,
//"<table><tr><td valign=bottom><a href=/><img width=210 height=25 border=0 src=/logo2.gif></a>&nbsp;&nbsp;</font></td><td><font size=+1><b>Admin Overview</td></tr></table>"
//"\n"
//"\n"
//"<br><br>\n"
//"\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Table of Contents\n"
"</td></tr></table>\n"
"\n"
"<br>\n"
"<b><a href=#input>Input Parameters</a></b> - parameters you can pass to Gigablast for search results<br><br>\n"
"<b><a href=#exact>Exact Hit Counts</a></b> - how to get the number of search results exactly<br><br>"
"<b><a href=#topics>Related Topics</a></b> - the input parameters for getting related topics<br><br>"
"<b><a href=#output>The XML Feed</a></b> - the format of the XML reply which contains the search results<br><br>\n"
"<b><a href=#errors>Error Codes</a></b> - how to interpret Gigablast errors<br><br>\n"
"<b><a href=#weighting>Weighting Query Terms</a></b> - how to pass in your own query term weights<br><br>"
);
p += gbstrlen ( p );
sprintf ( p ,
"<hr>"
"<b><a href=#requirements>Hardware Requirements</a></b> - what is required to run gigablast<br><br>"
"<b><a href=#perf>Performance Specifications</a></b> - various statistics.<br><br>"
"<b><a href=#install>Installation & Configuration</a></b> - the necessary files to run Gigablast<br><br>"
"<b><a href=#cmdline>Command Line Options</a></b> - various command line options (coming soon)<br><br>"
"<b><a href=#clustermaint>Cluster Maintenance</a></b> - running Gigablast on a cluster of computers.<br><br>"
"<b><a href=#trouble>Troubleshooting</a></b> - how to fix problems<br><br>"
"<b><a href=#disaster>Disaster Recovery</a></b> - dealing with a crashed "
"host<br><br>"
);
//if ( user == USER_MASTER ) p += gbstrlen ( p );
if ( g_users.hasPermission( r, PAGE_MASTER ) ) p += gbstrlen(p);
sprintf ( p ,
"<b><a href=#security>The Security System</a></b> - how to control access<br><br>"
"<hr>"
"<b><a href=#build>Building an Index</a></b> - how to start building your index<br><br>\n"
"<b><a href=#spider>The Spider</a></b> - all about Gigabot, Gigablast's crawling agent<br><br>\n"
"<b><a href=#quotas>Document Quotas</a></b> - how to limit documents into the index<br><br>\n"
"<b><a href=#injecting>Injecting Documents</a></b> - inserting documents directly into Gigablast<br><br>"
"<b><a href=#deleting>Deleting Documents</a></b> - removing documents from the index<br><br>"
);
p += gbstrlen ( p );
sprintf ( p ,
"<b><a href=#scoring>Scoring a Document</a></b> - how Gigablast scores a document and how to control it<br><br>"
"<b><a href=#metas>Indexing User-Defined Meta Tags</a></b> - how Gigablast indexes user-defined meta tags<br><br>"
"<b><a href=#bigdocs>Indexing Big Documents</a></b> - what controls the maximum size of a document that can be indexed?<br><br>"
"<b><a href=#langs>Indexing Different Languages</a></b> - how Gigablast indexes different languages<br><br>"
"<b><a href=#rolling>Rolling the New Index</a></b> - merging the realtime files into the base file<br><br>"
"<b><a href=#catdb>Building a DMOZ Based Directory</a></b> - using catdb to build a web directory based on open DMOZ data<br><br>"
"<hr>"
"<b><a href=#optimizing>Optimizing</a></b> - optimizing Gigablast's spider and query performance<br><br>\n"
"<b><a href=#logs>The Log System</a></b> - how Gigablast logs information<br><br>"
"<b><a href=#config>gb.conf</a></b> - describes the gb configuration file<br><br>"
"<b><a href=#hosts>Hosts.conf</a></b> - the file that describes all participating hosts in the network<br><br>\n"
//"<b><a href=#ruleset>Rulesets</a></b> - a ruleset tells Gigablast how to parse, score and spider a document<br><br>\n"
"<b><a href=#stopwords>Stopwords</a></b> - list of common words generally ignored at query time<br><br>\n"
"<b><a href=#phrasebreaks>Phrase Breaks</a></b> - list of punctuation that breaks a phrase<br><br>\n"
"\n"
"<br>\n"
"\n"
);
//if ( user == USER_MASTER ) p += gbstrlen ( p );
if ( g_users.hasPermission( r, PAGE_MASTER ) ) p += gbstrlen(p);
p += sprintf ( p ,
"<a name=input></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>The Input Parameters\n"
"</td></tr></table>\n"
"<br><br>\n"
"To get search results from Gigablast use a url like: <b><a href=\"/search?q=test&sc=0&dr=0&raw=8&topics=20+100\">http://www.gigablast.com/search?q=test&sc=0&dr=0&raw=8&topics=20+100</a></b> &nbsp; where:<br>"
""
"<br>"
"<table cellpadding=4>\n"
"\n"
"<tr><td bgcolor=#eeeeee>n=X</b></td>"
"<td bgcolor=#eeeeee>returns X search results. Default is 10. Max is 50.</td></tr>"
"\n"
"<tr><td>s=X</b></td>\n"
"<td>returns results starting at result #X. The first result is result #0. Default is 0. Max is 499.</td></tr>\n"
"\n"
"<tr><td bgcolor=#eeeeee>ns=X</b></td>"
"<td bgcolor=#eeeeee>returns X <b>summary excerpts</b> in the summary of each search result. Default is defined on a per collection basis in the <a href=\"/admin\">Display Controls</a>.</td></tr>"
"\n"
"<tr><td>site=X</b></td>\n"
"<td>returned results will have URLs from the site, X.</td></tr>\n"
"\n"
"<tr><td bgcolor=#eeeeee>plus=X</b></td>"
"<td bgcolor=#eeeeee>returned results will have all words in X. Like a default AND.</td></tr>"
"\n"
"<tr><td>minus=X</b></td>\n"
"<td>returned results will not have any words in X.</td></tr>\n"
"\n"
"<tr><td bgcolor=#eeeeee>rat=1</b></td>"
"<td bgcolor=#eeeeee>returned results will have ALL query terms. This is also known as a <i>default and</i> search. <i>rat</i> means Require All Terms. </td></tr>"
"\n"
"<tr><td>sc=X</b></td>\n"
"<td>X can be 0 or 1 to respectively disable or enable <a href=#siteclustering><b>site clustering</b></a>. Default is 1, but 0 if the <i>raw</i> parameter is used.</td></tr>\n"
"\n"
"<tr><td bgcolor=#eeeeee>dr=X</b></td>"
"<td bgcolor=#eeeeee>X can be 0 or 1 to respectively disable or enable <a href=#dupremoval><b>duplicate result removal</b></a>. Default is 1, but 0 if the <i>raw</i> parameter is used.</td></tr>"
"\n"
"<tr><td>raw=X</b></td>\n"
"<td>X ranges from 0 to 8 to specify the format of the search results. raw=8 requests the <b>XML feed</b>.</td></tr>\n"
"\n"
"<tr><td>raw=2</b></td>"
"<td>Just display a list of docids between &lt;pre&gt; tags. Will display one extra docid than requested if possible, so you know if you have more docids available or not. Does not have to generate summaries so it is a bit faster, especially if you do not perform <a href=\"#siteclustering\">site clustering</a> or <a href=\"#dupremoval\">dup removal</a>.</td></tr>"
""
"<tr><td bgcolor=#eeeeee>qh=X</b></td>"
"<td bgcolor=#eeeeee>X can be 0 or 1 to respectively disable or enable <b>highlighting</b> of query terms in the titles and summaries. Default is 1, but 0 if the <i>raw</i> parameter is used.</td></tr>"
"\n"
"<tr><td>usecache=X</b></td>\n"
"<td>X can be 0 or 1 to respectively disable or enable <b>caching</b> of the search results pages. Default is 1.</td></tr>\n"
"\n"
"<tr><td bgcolor=#eeeeee>rcache=X</b></td>"
"<td bgcolor=#eeeeee>X can be 0 or 1 to respectively disable or enable reading from the search results page cache. Default is 1.</td></tr>"
"\n"
"<tr><td>wcache=X</b></td>\n"
"<td>X can be 0 or 1 to respectively disable or enable writing to the search results page cache. Default is 1.</td></tr>\n"
"\n"
"<tr><td bgcolor=#eeeeee>bq=X</b></td>"
"<td bgcolor=#eeeeee>X can be 0 or 1 or 2. 0 means the query is NOT boolean, 1 means the query is boolean and 2 means to auto-detect. Default is 2.</td></tr>"
"\n"
"<a name=rt></a>\n"
"<tr><td>rt=X</b></td>\n"
"<td>X can be 0 or 1 to respectively disable or enable <b>real time searches</b>. If enabled, query response time will suffer because Gigablast will have to read from multiple files, usually 3 or 4, of varying ages, to satisfy a query. Default value of rt is 1, but 0 if the <i>raw</i> paramter is used.</td></tr>\n"
"\n"
"<a name=dt></a>\n"
"<tr><td bgcolor=#eeeeee>dt=X</b></td>"
"<td bgcolor=#eeeeee>X is a space-separated string of <b>meta tag names</b>. Do not forget to url-encode the spaces to +'s or %%20's. Gigablast will extract the contents of these specified meta tags out of the pages listed in the search results and display that content after each summary. i.e. <i>&dt=description</i> will display the meta description of each search result. <i>&dt=description:32+keywords:64</i> will display the meta description and meta keywords of each search result and limit the fields to 32 and 64 characters respectively. When receiving the XML feed from gigablast, the <i>&lt;display name=\"meta_tag_name\"&gt;meta_tag_content&lt;/display&gt;</i> XML tag will be used to convey each requested meta tag's content.</td></tr>\n"
"\n"
"<a name=spell></a>\n"
"<tr><td>spell=X</b></td>\n"
"<td>X can be 0 or 1 to respectively disable or enable <b>spell checking</b>. If enabled while using the XML feed, when Gigablast finds a spelling recommendation it will be included in the XML <spell> tag. Default is 0 if using an XML feed, 1 otherwise.</td></tr>\n"
"\n"
""
"<tr><td align=top bgcolor=#eeeeee>topics=NUM+MAX+SCAN+<br>MIN+MAXW+META+<br>DEL+IDF+DEDUP</b></td>\n"
"\n"
"<td bgcolor=#eeeeee>\n"
"<a name=topics></a>\n"
"<b>NUM</b> is how many <b>related topics</b> you want returned. \n"
"<br><br>\n"
"<b>MAX</b> is the maximum number of topics to generate and store in cache, so if TW is increased, but still below MT, it will result in a fast cache hit.\n"
"<br><br>\n"
"<b>SCAN</b> is how many documents to scan for related topics. If this is 30, for example, then Gigablast will scan the first 30 search results for related topics.\n"
"<br><br>\n"
"<b>MIN</b> is the minimum score of returned topics. Ranges from 0%% to over 100%%. 50%% is considered pretty good. BUG: This must be at least 1 to get any topics back.\n"
"<br><br>\n"
"<b>MAXW</b> is the maximum number of words per topic.\n"
"<br><br>\n"
"<b>META</b> is the meta tag name to which Gigablast will restrict the content used to generate the topics. Do not specify thie field to restrict the content to the body of each document, that is the default.\n"
"<br><br>\n"
"\n"
"<b>DEL</b> is a single character delimeter which defines the topic candidates. All candidates must be separated from the other candidates with the delimeter. So &lt;meta name=test content=\" cat dog ; pig rabbit horse\"&gt; when using the ; as a delimeter would only have two topic candidates: \"cat dog\" and \"pig rabbit horse\". If no delimeter is provided, default funcationality is assumed.\n"
"<br><br>\n"
""
"<b>IDF</b> is 1, the default, if you want Gigablast to weight topic candidates by their idf, 0 otherwise."
"<br><br>\n"
""
"<b>DEDUP</b> is 1, the default, if the topics should be deduped. This involves removing topics that are substrings or superstrings of other higher-scoring topics."
"<br><br>\n"
""
""
"Example: topics=49+100+30+1+6+author+%%3B+0+0"
"<br><br>\n"
"The default values for those parameters with unspecifed defaults can be defined on the \"Search Controls\" page. "
"<br><br>\n"
""
"XML feeds will contain the generated topics like: &lt;topic&gt;&lt;name&gt;&lt;![CDATA[some topic]]&gt;&lt;/name&gt;&lt;score&gt;13&lt;/score&gt;&lt;from&gt;metaTagName&lt;/from&gt;&lt;/topic&gt;"
"<br><br>\n"
"Even though somewhat nonstandard, you can specify multiple <i>&amp;topic=</i> parameters to get back multiple topic groups."
"<br><br>\n"
"Performance will decrease if you increase the MAX, SCAN or MAXW."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td align=top>rdc=X</b></td>\n"
"<td>\n"
"<a name=rdc></a>\n"
"X is 1 if you want Gigablast to return the number of documents that "
"contained each topic."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td align=top bgcolor=#eeeeee>rd=X</b></td>\n"
"<td bgcolor=#eeeeee>\n"
"<a name=rd></a>\n"
"X is 1 if you want Gigablast to return the list of docIds that "
"contained each topic."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td align=top>rp=X</b></td>\n"
"<td>\n"
"<a name=rd></a>\n"
"X is 1 if you want Gigablast to return the popularity of each topic."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td align=top bgcolor=#eeeeee>mdc=X</b></td>\n"
"<td bgcolor=#eeeeee>\n"
"<a name=rd></a>\n"
"Gigablast will not display topics that are not contained in at least X "
"documents. The default is configurable in the Search Controls page on a per "
"collection basis."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td align=top>t0=X</b></td>\n"
"<td>\n"
"<a name=exact></a>\n"
"Gigablast will use at least X docids from each termlist. Used to get more accurate hit counts."
"<br><br>\n"
"For performance reasons, most large search engines nowadays only return a rough estimate of the number of search results, but you may desire to get a better approximation or even an exact count. Gigablast allows you to do this, but it may be at the expense of query resonse time."
"<br><br>\n"
"By using the <b>t0</b> variable you can tell Gigablast to use a minimum number of docids from each termlist. Typically, <b>t0</b> defaults to something of around 10,000 docids. Often more docids than that are used, but this is just the minimum. So if Gigablast is forced to use more docids it will take longer to compute the search results on average, but it will give you a more precise hit count. By setting <b>t0</b> to the truncation limit or higher you will max out the hit count precision."
"<br><br>\n"
"Example: <b><a href=\"/search?q=test&t0=5000000\">http://www.gigablast.com/search?q=test&t0=5000000</a></b>\n"
""
"</table>\n"
""
""
"<br><br>\n"
"<b><a name=siteclustering>Site Clustering</a></b> \n"
"<br><br>\n"
"It is often undesirable to have many results listed from the same site. Site Clustering will essentially limit the number returned results from any given site to two, but it will provide a link which says \"more results from this site\" in case the searcher wishes it.\n"
"<br><br>\n"
"<b><a name=dupremoval>Duplicate Results Removal</a></b> \n"
"<br><br>\n"
"When dup results removal is enabled Gigablast will remove results that have the same content as other results. Right now the comparison is very strict, but will be somewhat relaxed in the future.\n"
""
"<br><br>\n"
"<b><a name=dupremoval>Cached Web Page Parameters</a></b> "
"<br><br>\n"
"To get a cached web page from Gigablast use a url like: <b>http://www.gigablast.com/get?d=12345&ih=1&q=my+query</b> &nbsp; where: <br>\n"
""
"<br>\n"
"<table cellpadding=4>\n"
""
"<tr><td bgcolor=#eeeeee>d=X</b></td>\n"
"<td bgcolor=#eeeeee>X is the docId of the page you want returned. DocIds are 64-bit, so you'll need 8 bytes to hold one.</td></tr>\n"
""
"<tr><td>ih=X</b></td>\n"
"<td>X is 1 to include the Gigablast header in the returned page, and 0 to exclude it.</td></tr>\n"
""
"<tr><td bgcolor=#eeeeee>ibh=X</b></td>\n"
"<td bgcolor=#eeeeee>X is 1 to include the Gigablast BASE HREF tag in the cached page. The default is 1.</td></tr>\n"
""
"<tr><td>q=X</b></td>\n"
"<td>X is the the query that, when present, will cause Gigablast to highlight the query terms on the returned page.</td></tr>\n"
""
"<tr><td bgcolor=#eeeeee>cas=X</b></td>\n"
"<td bgcolor=#eeeeee>"
"X can be 0 or 1 to respectively disable or enable click and scroll. Default is 1.</td></tr>\n"
""
"<tr><td>strip=X</b></td>\n"
"<td>"
"X can be 0, 1 or 2. If X is 0 then no stripping is performed. If X is 1 then image and other tags are removed. An X of 2 is another form of removing tags. Default is 0.</td></tr>\n"
""
"</table>\n"
""
""
"<br><br>\n"
"\n"
""
""
""
"<a name=output></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>The XML Feed\n"
"</td></tr></table>\n"
"<br><br>\n"
"Gigablast allows you to receive the search results in a number of formats useful for interfacing to your program. By specify a \"raw=8\" as a cgi parameter you can receive the results in XML. Here is an <b><a href=/example.xml>example</a></b> of the raw=8 feed.\n"
"Additionally, raw=9 may be used to obtain the feed encoded in UTF-8.\n"
"<br><br>\n"
"The XML reply has the following format (but without the comments):\n"
"<br><br>\n"
"<pre>\n"
"# The XML reply uses the Latin-1 Character Set (ISO 8859-1) when using raw=8\n"
"<b>&lt;?xml version=\"1.0\" encoding=\"ISO-8859-1\" ?&gt;</b>\n"
"# OR when using raw=9\n"
"<b>&lt;?xml version=\"1.0\" encoding=\"utf-8\" ?&gt;</b>\n"
"\n"
"# It consists of one, and only one, response.\n"
"<b>&lt;response&gt;</b>\n"
"\n"
" # If any error was received in processing the request, it will be here.\n"
" <b>&lt;error&gt;Out of memory&lt/error&gt;</b>\n"
" # The numeric code of the error, if any, goes here.\n"
" # See all the <a href=#errors>Error Codes</a>, but the "
" # following errors are most likely:\n"
" # %5li - A cached page was not found when it should have been.\n"
" # %5li - There was a shortage of memory to properly process the request.\n"
" # %5li - Queried collection does not exist.\n",
(long)ENOTFOUND,
(long)ENOMEM,
(long)ENOCOLLREC);
sprintf( p ,
" <b>&lt;errno&gt;32790&lt/errno&gt;</b>\n"
" # Total number of documents in the collection being searched.\n"
" <b>&lt;docsInCollection&gt;2060245584&lt;/docsInCollection&gt;</b>\n"
" # An APPROXIMATION of the total number of search results for the query.\n"
" <b>&lt;hits&gt;4838158&lt;/hits&gt;</b>\n"
" # This is \"1\" if more results are available after these, \"0\" if not.\n"
" <b>&lt;moreResultsFollow&gt;1&lt;/moreResultsFollow&gt;</b>\n"
" # If present and value is 1, some words in the query were censored for content.\n"
" <b>&lt;queryCensored&gt;1&lt;/queryCensored&gt;</b>\n"
" # If present, the value is the number of results that were censored for content.\n"
" <b>&lt;resultsCensored&gt;3&lt;/resultsCensored&gt;</b>\n"
" # If this tag is present, it will hold an alternate spelling recommendation \n"
" # for the query. The &spell=1 parameter must be present in the query url,\n"
" # however, for you to get a spelling recommendation back.\n"
" <b>&lt;spell&gt;nose&lt;/spell&gt;</b>\n"
" # If this tag is present, it contains the list of query words that were \n"
" # ignored as individual words, but not necessarily as part of a phrase\n"
" <b>&lt;ignoredWords&gt;the in of&lt;/ignoredWords&gt;</b>\n"
" # This is how many of the search results contain ALL of the query terms.\n"
" # It is only used for printing the \"blue bar\" for doing <a href=\"/superRecall.html\">SuperRecall</a>\n"
" <b>&lt;minNumExactMatches&gt;300&lt;/minNumExactMatches&gt;</b>\n"
"\n"
" # The list of related topics, each enclosed by &lt;topic&gt; tags. \n"
" # You must provide a <i>topics</i> parameter to the query url to get "
"topics.\n"
" <b>&lt;topic&gt;</b>\n"
" # Each topic has a score. A score of 50%% or more is considered pretty good.\n"
" <b>&lt;score&gt;63&lt;/score&gt;</b>\n"
" # Out of the documents scanned, how many contain this topic.\n"
" <b>&lt;docCount&gt;4&lt;/docCount&gt;</b>\n"
" # The topic popularity. A measure of how popular the word or phrase is\n"
" # based on how many web pages contain it overall. Ranges from 0 to 1000.\n"
" # 1000 being the most popular.\n"
" <b>&lt;popularity&gt;16&lt;/popularity&gt;</b>\n"
" # The docIds of the documents scanned that contain this topic.\n"
" <b>&lt;docId&gt;9030668134&lt;/docId&gt;</b>\n"
" <b>&lt;docId&gt;265962215563&lt;/docId&gt;</b>\n"
" <b>&lt;docId&gt;43940265200&lt;/docId&gt;</b>\n"
" <b>&lt;docId&gt;264861015824&lt;/docId&gt;</b>\n"
" # The topic name.\n"
" <b>&lt;name&gt;&lt;![CDATA[Race Cars]]&gt;&lt;/name&gt;</b>\n"
" # And OPTIONALLY the name of the meta tag it was derived from.\n"
" <b>&lt;from&gt;keywords&lt;/from&gt;</b>\n"
" <b>&lt;/topic&gt;</b>\n"
"\n"
" # The list of reference pages for the search results. Each reference is\n"
" # enclosed in &lt;reference&gt; tags.\n"
" <b>&lt;reference&gt;</b>\n"
" # Each reference has a score based on its relevance to the query.\n"
" <b>&lt;score&gt;93&lt;/score&gt;</b>\n"
" # Title of the reference page\n"
" <b>&lt;title&gt;<![CDATA[A Great Reference]]>&lt;/title&gt;</b>\n"
" # Url of the reference page\n"
" <b>&lt;url&gt;&lt;![CDATA[http://www.greatreference.com/]]&gt;&lt;/url&gt</b>\n"
" <b>&lt;/reference&gt;</b>\n"
"\n"
" # The list of related pages for the search results. Each related page is\n"
" # enclosed in &lt;related&gt; tags.\n"
" <b>&lt;related&gt;</b>\n"
" # Each related page has a score based on its relevance to the query.\n"
" <b>&lt;score&gt;91&lt;/score&gt;</b>\n"
" # Title of the related page.\n"
" <b>&lt;title&gt;<![CDATA[Something Similar]]>&lt;/title&gt;</b>\n"
" # Url of the related page.\n"
" <b>&lt;url&gt;&lt;![CDATA[http://www.similar.com/]]&gt;&lt;/url&gt</b>\n"
" # Summary of the related page.\n"
" <b>&lt;sum&gt;&lt;![CDATA[This page is similar to the results]]&gt;&lt;/sum&gt</b>\n"
" <b>&lt;/related&gt;</b>\n"
"\n"
" # The list of search results, each enclosed in &lt;result&gt; tags.\n"
" <b>&lt;result&gt;</b>\n"
" # Each result has a title. This may be empty if none was found on the page.\n"
" <b>&lt;title&gt;&lt;![CDATA[My Homepage]]&gt;&lt;/title&gt;</b>\n"
" # Each result has a summary. This may be empty. The summary is generated \n"
" # so as to contain the query terms if possible.\n"
" <b>&lt;sum&gt;&lt;![CDATA[All about my interests and hobbies]]&gt;&lt;/sum&gt;</b>\n"
" # If this result is categorized under the DMOZ Directory, data about each\n"
" # category it is in will be enclosed in a &lt;dmoz&gt; tag.\n"
" <b>&lt;dmoz&gt;</b>\n"
" # The category ID number of this category.\n"
" <b>&lt;dmozCatId&gt;172&lt;/dmozCatId&gt;</b>\n"
" # The path of this category in the directory.\n"
" <b>&lt;dmozCat&gt;&lt;![CDATA[Health: Dentistry]]&gt;&lt;/dmozCat&gt;</b>\n"
" # Title of this result as listed in the directory.\n"
" <b>&lt;dmozTitle&gt;&lt;![CDATA[My Homepage]]&gt;&lt;/dmozTitle&gt;</b>\n"
" # Description of this page as listed in the directory.\n"
" <b>&lt;dmozDesc&gt;&lt;![CDATA[A Dentist's Home Page]]&gt;&lt;/dmozDesc&gt;</b>\n"
" <b>&lt;/dmoz&gt;</b>\n"
" # If the directory is being given along with the results, this is the number of\n"
" # stars given to this page based on its quality.\n"
" <b>&lt;stars&gt;3&lt;/stars&gt;</b>\n"
" # Each result may have a sequence of &lt;display&gt; tags if the feed input\n"
" # contained a <a href=#input>dt</a> parameter. This allows you to extract\n"
" # information contained in meta tags in the content of each search result.\n"
" # To obtain the contents of the author meta tag, you would need to pass in\n"
" # dt=author.\n"
" <b>&lt;display name=\"author\"&gt;&lt;![CDATA[Contents of the meta author tag]]&gt;&lt/display&gt;</b>\n"
" # Each result has a URL. This should never be empty.\n"
" <b>&lt;url&gt;&lt;![CDATA[http://www.mydomain.com/mypage.html]]&gt;&lt;/url&gt;</b>\n"
" # The size of the page in kilobytes. Accurate to the tenth of a kilobyte.\n"
" <b>&lt;size&gt;5.6&lt;/size&gt;</b>\n"
" # The time the page was last INDEXED. It may not have been indexed in a \n"
" # long time if the page's content has not changed. The time is expressed \n"
" # in seconds since the epoch. (Jan 1, 1969)\n"
" <b>&lt;spidered&gt;1064367311&lt;/spidered&gt;</b> \n"
" # The time the page was last modified. This is taken from the HTTP reply \n"
" # of the web server when downloading the page. It is 0 if unknown. The time\n"
" # is expressed in seconds since the epoch. (Jan 1, 1969)\n"
" <b>&lt;lastMod&gt;1058477041&lt;/lastMod&gt;</b>\n"
" # The assigned docid for this page. This number is unique and used \n"
" # internally by Gigablast to identify this page. It is used to retrieve the\n"
" # \"cached copy\" of the page.\n"
" <b>&lt;docId&gt;65990704587&lt;/docId&gt;</b>\n"
" # When doing site clustering, this tag will be present if the result is \n"
" # from the same hostname as a previous result for the same query. It \n"
" # indicates that you might want to indent the result. Any further results \n"
" # from this same hostname will be stripped from the feed.\n"
" <b>&lt;clustered&gt;1&lt;/clustered&gt;</b>\n"
" # When Topic Clustering is being used, these will display results which \n"
" # are considered similar to this result and have been clustered under it. \n"
" # Each similar result is enclosed in a &lt;similar&gt; tag. \n"
" <b>&lt;similar&gt;</b>\n"
" # The url for the similar result.\n"
" <b>&lt;url&gt;&lt;![CDATA[http://www.similar.com/]]&gt;&lt;/url&gt;</b>\n"
" # The title of the similar result.\n"
" <b>&lt;title&gt;&lt;![CDATA[A similar topic]]&gt;&lt;/title&gt;</b>\n"
" <b>&lt;/similar&gt;</b>\n"
" # If this is present and set to 1, there are more similar results beyond \n"
" # those given here. \n"
" <b>&lt;moreSimilar&gt;1&lt;/moreSimilar&gt;</b>\n"
" # This is a standard HTTP MIME content classification of the result. It is \n"
" # not present if the page is text/html. Otherwise, it will be one of the\n"
" # following: text/plain\n"
" # text/xml\n"
" # application/pdf\n"
" # application/msword\n"
" # application/vnd.ms-excel\n"
" # application/mspowerpoint\n"
" # application/postscript\n"
" <b>&lt;contentType&gt;text/plain&lt;/contentType&gt;</b>\n"
" # The documents are all sorted by this score. This score is a generally a\n"
" # product of the WEIGHT of the query term and the COUNT of the query term\n"
" # in this document. The WEIGHT is usually influenced by them term frequency\n"
" # of the query term (rarer terms get more WEIGHT), by the additional weight\n"
" # received by phrases which can be adjusted in the Master Controls, and,\n"
" # possibly, by any user-defined weight in the query (See <a href=\"#weighting\">Weighting Query Terms</a>).\n"
" # This score is normalized by dividing by the maximum\n"
" # score for all documents in the search results and then making it into a\n"
" # percentage, so the score ranges from 0 to 100, and the first result\n"
" # should always have score 100.\n"
" <b>&lt;score&gt;100&lt;/score&gt;</b>\n"
" # This is the absolute score. Useful for merging results from other\n"
" # collections or other search engines.\n"
" <b>&lt;absScore&gt;5132&lt;/absScore&gt;</b>\n"
" # This is the language the page was detected as.\n"
" <b>&lt;language&gt;&lt;![CDATA[English]]&gt;&lt;/language&gt;</b>\n"
" # The character set this page was originally encoded in. \n"
" <b>&lt;charset&gt;&lt;![CDATA[utf-8]]&gt;&lt;/charset&gt;</b>\n"
" <b>&lt;/result&gt;</b>\n"
"\n"
" <b>&lt;result&gt;</b>\n"
" ...\n"
" <b>&lt;/result&gt;</b>\n"
"\n"
" ...\n"
"\n"
" # If the directory has been requested, this node will include the directory\n"
" # structure for the requested category. Typically this is above the results.\n"
" <b>&lt;directory&gt;</b>\n"
" # Category ID for the displayed directory structure.\n"
" <b>&lt;dirId&gt;172&lt;/dirId&gt;</b>\n"
" # Directory path of this category listing.\n"
" <b>&lt;dirName&gt;Health: Dentistry&lt;/dirName&gt;</b>\n"
" # Specifies if the directory listing is displayed in a Right-To-Left format.\n"
" <b>&lt;dirIsRTL&gt;1&lt;/dirIsRTL&gt;</b>\n"
" # Sub-Categories listed as letters meant to be displayed as a letter bar.\n"
" # Each sub-category will be enclosed in a &lt;letterbar&gt; tag.\n"
" <b>&lt;letterbar&gt;&lt;![CDATA[Health/Dentistry/A]]&gt;</b>"
" # Every sub category will include a count of how many urls are listed under it.\n"
" <b>&lt;urlcount&gt;5&lt;urlcount&gt;</b>\n"
" <b>&lt;/letterbar&gt;</b>\n"
" # Normal sub-categories listed in groups. These are listed in order of group\n"
" # and alphabetically within each group. Each sub-category is enclosed in a\n"
" # &lt;narrow2&gt;, &lt;narrow1&gt;, or &lt;narrow&gt; tag.\n"
" <b>&lt;narrow2&gt;&lt;![CDATA[Health/Dentistry/Regional]]&gt;</b>\n"
" <b>&lt;urlcount&gt;0&lt;urlcount&gt;</b>\n"
" <b>&lt;/narrow2&gt;</b>\n"
" <b>&lt;narrow1&gt;&lt;![CDATA[Health/Dentistry/Association]]&gt;</b>\n"
" <b>&lt;urlcount&gt;122&lt;urlcount&gt;</b>\n"
" <b>&lt;/narrow1&gt;</b>\n"
" <b>&lt;narrow&gt;&lt;![CDATA[Health/Dentistry/Children]]&gt;</b>\n"
" <b>&lt;urlcount&gt;24&lt;urlcount&gt;</b>\n"
" <b>&lt;/narrow&gt;</b>\n"
" # Symbolically linked sub-categories physically under a different category.\n"
" # These will be interwoven alphabetically within the respective narrow groups.\n"
" # The name listed before the path is the symbolic name. Each symbolically linked\n"
" # sub-category is enclosed in a &lt;symbolic2&gt;, &lt;symbolic1&gt;, or \n"
" # &lt;symbolic&gt; tag.\n"
" <b>&lt;symbolic2&gt;&lt;![CDATA[Dentophobia:Health/Mental_Health/Disorders/Anxiety/Phobias/Dentophobia]]&gt;</b>\n"
" <b>&lt;urlcount&gt;2&lt;urlcount&gt;</b>\n"
" <b>&lt;/symbolic2&gt;</b>\n"
" <b>&lt;symbolic1&gt;&lt;![CDATA[Dental_Laboratories:Buisness/Healthcare/Products_and_Services/Dentistry/Dental_Laboratories]]&gt;</b>\n"
" <b>&lt;urlcount&gt;71&lt;urlcount&gt;</b>\n"
" <b>&lt;/symbolic1&gt;</b>\n"
" <b>&lt;symbolic&gt;&lt;![CDATA[Products:Shopping/Health/Dental]]&gt;</b>\n"
" <b>&lt;urlcount&gt;71&lt;urlcount&gt;</b>\n"
" <b>&lt;/symbolic&gt;</b>\n"
" # Seperate categories in the directory which are related to this one.\n"
" <b>&lt;related&gt;&lt;![CDATA[Society/Issues/Health/Dentistry]]&gt;</b>\n"
" <b>&lt;urlcount&gt;4&lt;/urlcount&gt;</b>\n"
" <b>&lt;/related&gt;</b>\n"
" # This category in other languages in the directory.\n"
" <b>&lt;altlang&gt;&lt;![CDATA[Basque:World/Euskara/Osasuna/Odontologia]]&gt;</b>\n"
" <b>&lt;urlcount&gt;7&lt;/urlcount&gt;</b>\n"
" <b>&lt;/altlang&gt;</b>\n"
" <b>&lt;/directory&gt;</b>\n"
"\n"
"<b>&lt;/response&gt;</b>\n"
"</pre>\n"
"\n"
""
"<a name=errors></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Error Codes\n"
"</td></tr></table>\n"
"<br><br>\n");
p += gbstrlen(p);
p+=sprintf(p,
//"Gigablast is often interfaced with to add or delete "
//"collections, to inject or delete documents or to obtain "
//"search results.<br>\n"
"<ul>"
"<li>In all cases Gigablast may return an error in the "
"usual HTTP fashion, where the HTTP reply "
"has a format like:<br>\n"
"<b>\n"
"HTTP xxx (yyy)\n"
"</b><br>\n"
"Where <i>xxx</i> is 200 on success and 500 on error and "
"<i>yyy</i> is the textual error message, as printed out "
"by the strerror() function or equivalent. The error "
"message will be from one in the table below."
"<br><br>\n\n"
"<li>When adding or deleting documents via Gigablast's "
"injection interface, errors can also be returned as "
"stated at the end of the <a href=#ireply>Injecting "
"Documents</a> section. In these cases the HTTP status "
"is still 200."
"<br><br>\n\n"
"<li>When obtaining search results via the <a href=#output>"
"XML feed</a>, "
"the error message, and possibly error number, can be "
"contained in the &lt;error&gt; and &lt;errno&gt; tags "
"respectively. When this happens search results are still "
"often presented, with an HTTP status of 200, although the "
"error might have caused "
"the results to be different than what they should have "
"been. For "
"instance, if corrupted data prevented from one particular "
"result from being displayed."
"<br><br>\n\n"
"</ul>"
"<br>\n"
"<table cellpadding=2>"
"<tr><td collspan=2><b>Key</b></td></tr>\n"
"<tr><td>a</td><td>"
"Error used by an add or delete collection "
"operation."
"</td></tr>\n"
"<tr><td>i</td><td>"
"Error used by an inject (or delete) operation."
"</td></tr>\n"
"<tr><td>s</td><td>"
"Error used by a search operation."
"</td></tr>\n"
"</table>\n\n");
p += sprintf(p,"<table cellpadding=2>\n" );
// c errors
char *c = "eeeeee";
p += sprintf(p,"<tr><td colspan=3><b>C error codes</b></td></tr>\n");
for ( long i = 1 ; i <= EMEDIUMTYPE ; i++ ) {
char *b = p;
p += sprintf(p,"<tr bgcolor=#%s><td>%li</td>"
"<td>%s</td><td>",
c,i,strerror(i));
char *s = p;
// is it for injector, search results or addcoll interface?
// use 'i','s','a'
switch ( i ) {
case EPERM :
p += sprintf(p,"a - Did not have permission in the "
"working dir to create/delete the "
"collection subdir.");
break;
case ENOENT:
p += sprintf(p,"a - When creating the subdir for the "
"collection in the working dir, a "
"directory component in pathname "
"does not exist or is a dangling "
"symbolic link.");
break;
case EIO :
p += sprintf(p,"a,i,s - There was an error writing or "
"reading data to or from the disk, most "
"likely due to a hardware failure.");
break;
case EACCES:
p += sprintf(p,"a,i - The working directory, or its "
"parent does not allow write "
"permission.");
break;
case EEXIST:
p += sprintf(p,"a - The collection subdir already "
"exists in the working dir.");
break;
case ENOSPC:
p += sprintf(p,"a,i - There is no room on the drive "
"to write data because the drive is "
"full, or the user's disk quota is "
"exhausted.");
break;
case EBADF:
p += sprintf(p,"a,i,s - Read or write on a bad file "
"descriptor. This should not happen.");
break;
case ENOBUFS :
p += sprintf(p,"a - Collection name limit of %li is "
"exceeded.",(long)MAX_COLLS);
break;
case ENOMEM:
p += sprintf(p,"a,i,s - Out of memory.");
break;
}
// don't print if not used!
if ( s == p ) { p = b; continue; }
if ( c[0] == 'e' ) c = "ffffff";
else c = "eeeeee";
p += sprintf(p,"</td></tr>\n");
}
// gigablast errors
p += sprintf(p,"<tr><td colspan=3><b>Gigablast error codes</b>"
"</td></tr><br><br>\n");
for ( long i = EDUMPFAILED ; i <= ECANCELACK ; i++ ) {
char *b = p;
p += sprintf(p,"<tr bgcolor=#%s><td>%li</td>"
"<td>%s</td><td>",
c,i,mstrerror(i));
char *s = p;
// is it for injector, search results or addcoll interface?
// use 'i','s','a'
switch ( i ) {
case ETRYAGAIN:
p += sprintf(p,"a,i,s - Resources temporarily "
"unavailable.");
break;
case ENOCOLLREC:
p += sprintf(p,"a,i,s - Referenced collection does "
"not exist.");
break;
case EBADENGINEER :
p += sprintf(p,"a - Collection name being added "
"contains an illegal character, or an "
"empty name was provided, or the name "
"is more than %li characters.<br>",
(long)MAX_COLL_LEN);
// SpiderLoop.cpp Msg7.cpp PageInject.cpp
p += sprintf(p,"i - No URL was provided, or URL "
"has no hostname. Or provided URL is "
"currently being injected. Or %li "
"injects are currently in progress.",
(long)MAX_SPIDERS);
break;
//case EURLTOOLONG :
//p += sprintf(p,"i - Injected URL was longer than "
// "%li characters.",(long)MAX_URL_LEN);
//break;
case EBADREPLY:
p += sprintf(p,"i - Received bad internal reply. You "
"should never see this error.");
break;
case EEXIST:
p += sprintf(p,"a - Adding a collection name that "
"already exists.");
break;
case ENOTFOUND:
p += sprintf(p,"i - When looking up old document "
"for injected URL it was not found when "
"it should have been. This is due to "
"data corruption.");
break;
case ENODOCID:
p += sprintf(p,"i - No docids were available to "
"inject the URL. The database has "
"reached its limit.");
break;
case EBUFTOOSMALL:
p += sprintf(p,"i - Injected URL was longer than "
"%li characters. Or the injected "
"document was too big to fit in memory, "
"so consider increasing "
"<titledbMaxTreeMem> in gb.conf."
,(long)MAX_URL_LEN);
break;
case ENOSITEDEFAULT:
p += sprintf(p,"i - The default tagdb*.xml (ruleset) "
"file was not found. "
"Make sure that the ruleset used "
"by tagdb or by the Url Filters page "
"for this url is present in the working "
"dir.");
break;
case EDOCBADCONTENTTYPE:
p += sprintf(p,"i - The URL's file extension is not "
"recognized as an indexable file type.");
break;
case EBADMIME:
p += sprintf(p,"i - The provided HTTP mime (if the "
"<a href=#injecting>hasmime flag</a> "
"was set) was "
"not present or illegal.");
break;
case ENOSLOTS:
p += sprintf(p,"a,i,s - There was a shortage of "
"sockets, please try again.");
break;
case ECLOSING:
p += sprintf(p,"i - Gigablast is shutting down, "
"so the inject failed.");
break;
case EISCLOSING:
p += sprintf(p,"i - Gigablast is shutting down, "
"so the inject failed.");
break;
case EBADTITLEREC:
p += sprintf(p,"i,s - A cached document was "
"corrupt on disk.");
break;
case EMISSINGQUERYTERMS:
p += sprintf(p,"s - A document in the search results "
"did not contain all the query terms.");
break;
case EQUERYTOOBIG:
p += sprintf(p,"s - Query was too long.");
break;
case EQUERYTRUNCATED:
p += sprintf(p,"s - Query was truncated.");
break;
case ETOOMANYOPERANDS:
p += sprintf(p,"s - Query has too many operands.");
break;
case EDNSBAD:
p += sprintf(p,"i - DNS error");
break;
case EDNSREFUSED:
p += sprintf(p,"i - DNS error");
break;
case EDNSTIMEDOUT:
p += sprintf(p,"i - DNS error");
break;
case ESHUTTINGDOWN:
p += sprintf(p,"i - Gigablast is shutting down, "
"so the inject failed.");
break;
// this uses the HTTP interface
//case ENOPERM:
// p += sprintf(p,"s - Your IP is banned from searching "
// "the provided collection. See the "
// "Access page to remove it from the ban "
// "list."
}
if ( p == s ) { p = b; continue; }
if ( c[0] == 'e' ) c = "ffffff";
else c = "eeeeee";
p += sprintf(p,"</td></tr>\n");
}
sprintf ( p ,
"</table><br><br>"
"<a name=weighting></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Weighting Query Terms"
"</td></tr></table>\n"
"<br><br>\n"
"Gigablast allows you to pass in weights for each term in the provided query. The query term weight operator, which is directly inserted into the query, takes the form: <b>[XY]</b>, where <i>X</i> is the weight you want to apply and <i>Y</i> is <b><i>a</i></b> if you want to make it an absolute weight or <b><i>r</i></b> for a relative weight. Absolute weights cancel any weights that Gigablast may place on the query term, like weights due to the term's popularity, for instance. The relative weight, on the other hand, is multiplied by any weight Gigablast may have already assigned."
"<br><br>\n"
"The query term weight operator will affect all query terms that follow it. To turn off the effects of the operator just use the blank operator, <b>[]</b>. Any weight operators you apply override any previous weight operators."
"<br><br>\n"
"The weight applied to a phrase is unaffected by the weights applied to its constituent terms. In order to weight a phrase you must use the <b>[XYp]</b> operator. To turn off the affects of a phrase weight operator, use the phrase blank operator, <b>[p]</b>."
"<br><br>\n"
"Applying a relative weight of 0 to a query term, like <b>[0r]</b>, has the effect of still requiring the term in the search results (if it was not ignored), but not allowing it to contribute to the ranking of the search results. However, when doing a default OR search, if a document contains two such terms, it will rank above a document that only contains one such term. "
"<br><br>\n"
"Applying an absolute weight of 0 to a query term, like <b>[0a]</b>, causes it to be completely ignored and not used for generating the search results at all. But such ignored or devalued query terms may still be considered in a phrase context. To affect the phrases in a similar manner, use the phrase operators, <b>[0rp]</b> and <b>[0ap]</b>."
"<br><br>\n"
"Example queries:"
"<br><br>\n"
"<b>[10r]happy [5rp][13r]day []lucky</b><br>\n"
"<i>happy</i> is weighted 10 times it's normal weight.<br>\n"
"<i>day</i> is weighted 13 times it's normal weight.<br>\n"
"<i>\"day lucky\"</i>, the phrase, is weighted 5 times it's normal weight.<br>\n"
"<i>lucky</i> is given it's normal weight assigned by Gigablast."
"<br><br>\n"
"Also, keep in mind not to use these weighting operators between another query operator, like '+', and its affecting query term. If you do, the '+' or '-' operator will not work."
"<br><br>\n"
""
""
""
""
""
""
""
);
p += gbstrlen ( p );
sprintf ( p ,
"<a name=requirements></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Hardware Requirements"
"</td></tr></table>\n"
"<br>\n"
""
"At least one computer with 512MB RAM, 10GB of hard drive space and "
"any distribution of Linux with the 2.4.25 kernel or higher. "
"<br><br>\n"
""
"<br>\n"
""
"<a name=perf></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Performance Specifications"
"</td></tr></table>\n"
"<br>\n"
"Gigablast can store 100,000 web pages (each around 25k in size) per "
"gigabyte of disk storage. A typical single-cpu pentium 4 machine can index "
"one to two million web pages per day even when Gigablast is near its maximum "
"document capacity for the hardware. A cluster of N such machines can index "
"at N times that rate."
"<br><br>\n"
""
"<br>\n"
""
"<a name=install></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Installation & Configuration"
"</td></tr></table>\n"
"<br>\n"
""
"<b>1.</b> "
"Create one directory for every Gigablast process you would like to run. Each Gigablast process is also called a <i>host</i>. "
"<br><br>\n"
""
"<b>2.</b>\n"
"Populate each directory with the following files and subdirectories:"
"<br><br>\n"
"<dd>\n"
"<table cellpadding=3>\n"
"<tr><td><b>gb</b></td><td>The Gigablast executable. Contains the web server, the database and the spider. This file is required to run gb.</td></tr>\n"
"<tr><td><b><a href=#hosts>hosts.conf</a></b></td><td>This file describes each host (gb process) in the Gigablast network. Every gb process uses the same hosts.conf file. This file is required to run gb."
""
"<tr><td><b><a href=#config>gb.conf</a></b></td><td>Each gb process is called a <i>host</i> and each gb process has its own gb.conf file. This file is required to run gb."
"<tr><td><b>coll.XXX.YYY/</b></td><td>For every collection there is a subdirectory of this form, where XXX is the name of the collection and YYY is the collection's unique id. Contained in each of these subdirectories is the data associated with that collection.</td></tr>"
"<tr><td><b>coll.XXX.YYY/coll.conf</b></td><td>Each collection contains a configuration file called coll.conf. This file allows you to configure collection specific parameters. Every parameter in this file is also controllable via your the administrative web pages as well.</td></tr>"
"<tr><td><b>trash/</b></td><td>Deleted collections are moved into this subdirectory. A timestamp in milliseconds since the epoch is appended to the name of the deleted collection's subdirectory after it is moved into the trash sub directory. Gigablast doesn't physically delete collections in case it was a mistake.</td></tr>"
"<tr><td><b><a href=#ruleset>tagdbN.xml</a></b></td><td>Several files where N is an integer. The files must be contiguous, starting with an N of 0. Each one of these files is a <a href=#ruleset>ruleset</a> file. This file is required for indexing and deleting documents.</tr>\n"
"<tr><td><b>html/</b></td><td>A subdirectory that holds all the html files and images used by Gigablast. Includes Logos and help files.</tr>\n"
"<tr><td><b>dict/</b></td><td>A subdirectory that holds files used by the spell checker and the GigaBits generator. Each file in dict/ holds all the words and phrases starting with a particular letter. The words and phrases in each file are sorted by a popularity score.</tr>\n"
"<tr><td><b>antiword</b></td><td>Executable called by gbfilter to convert Microsoft Word files to html for indexing.</tr>\n"
"<tr><td><b>.antiword/</b></td><td>A subdirectory that contains information needed by antiword.</tr>\n"
"<tr><td><b>pdftohtml</b></td><td>Executable called by gbfilter to convert PDF files to html for indexing.</tr>\n"
"<tr><td><b>pstotext</b></td><td>Executable called by gbfilter to convert PostScript files to text for indexing.</tr>\n"
"<tr><td><b>ppthtml</b></td><td>Executable called by gbfilter to convert PowerPoint files to html for indexing.</tr>\n"
"<tr><td><b>xlhtml</b></td><td>Executable called by gbfilter to convert Microsoft Excel files to html for indexing.</tr>\n"
"<tr><td><b>gbfilter</b></td><td>Simple executable called by Gigablast with document HTTP MIME header and document content as input. Output is an HTTP MIME and html or text that can be indexed by Gigablast.</tr>\n"
"<tr><td><b><a href=#gbstart>gbstart</a></b></td><td>An optional simple script used to start up the gb process(es) on each computer in the network. Otherwise, iff you have passwordless ssh capability then you can just use './gb start' and it will spawn an ssh command to start up a gb process for each host listed in hosts.conf.</tr>\n"
"</table>\n"
"<br><br>\n"
""
"<b>2.</b> "
"Edit or create the <a href=#hosts>hosts.conf</a> file."
"<br><br>\n"
""
"<b>3.</b> "
"Edit or create the <a href=#config>gb.conf</a> file."
"<br><br>\n"
/*""
"<b>4.</b> "
"Edit or create the <a href=#gbstart>gbstart</a> shell script on each participating computer so it will run all the required gb processes on that computer."
"<br><br>\n"
""
"<b>5.</b> "
"Execute the <a href=#gbstart>gbstart</a> shell script on each participating computer."
"<br><br>\n"
*/
""
"<b>4.</b> "
"Direct your browser to any host listed in the <a href=#hosts>hosts.conf</a> file to begin administration."
"<br><br>\n"
""
"<br>\n"
""
);
//if ( user == USER_MASTER ) p += gbstrlen ( p );
if ( g_users.hasPermission( r, PAGE_MASTER ) ) p += gbstrlen(p);
sprintf ( p ,
"<a name=clustermaint></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Cluster Maintenance"
"</td></tr></table>\n"
"<br>\n"
"For the purposes of this section, we assume the name of the cluster "
"is gf and all hosts in the cluster "
"are named gf*. The Master host of the cluster is gf0. The "
"gigablast working directory is assumed to be /a/ and the /etc/dsh/machines.list file contains only the machine names in the cluster."
"<br><br>"
"<b>To perform operations on all machines on the network:</b>"
"<ul>"
"<li>To setup dsh:"
"<ul>"
"<li> Install the dsh package, on debian it would be:<br>"
" <b> $ apt-get install dsh</b><br>"
"<li> Add the names of all of the machines in the cluster to /etc/dsh/machines.list (newline separated, but does not end in a new line)<br>"
"</ul>"
"<b>To setup dsh on a machine on which we do not have root:</b>"
"<ul>\n"
"<li>cd to the working directory\n"
"<li>Copy /usr/lib/libdshconfig.so.1.0.0 to the working directory.\n"
"<li><b>export LD_PATH=.\n</b>"
"<li>run <b>dsh -r rcp -f filename hostname</b> as a test. Use scp if rcp not available. filename is the file that contains the hostnames to dsh to.\n"
"</ul>\n"
"<li>To use the dsh command."
"<ul>"
"<li>to copy a master configuration file to all hosts:<br>\n"
" <b>$ dsh -a 'scp gf0:/a/coll.conf /a/coll.conf'</b><br>\n"
"<li>to check running processes on all machines concurrently (-c option):<br>\n"
" <b>$ dsh -ac 'ps auxww'</b><br>\n"
"</ul>"
"</ul>"
"<b>To prepare a new cluster or erase an old cluster:</b>"
"<ul>\n"
"<li>Save <b>/a/gb.conf</b>, <b>/a/hosts.conf</b>, and <b>/a/coll.*.*/coll.conf</b> files somewhere besides on /dev/md0 if they exist and you want to keep them.\n"
"<li>cd to a directory not on /dev/md0\n"
"<li>Login as root using <b>su</b>\n"
"<li>Use <b>dsh -ac 'umount /dev/md0'</b> to unmount the working directory. All login shells must exit or cd to a different directory, and all processes with files opened in /dev/md0 must exit for the unmount to work.\n"
"<li>Use <b>dsh -ac 'umount /dev/md0'</b> to unmount the working directory.\n"
"<li>Use <b>dsh -ac 'mke2fs -b4096 -m0 -N20000 -R stride=32 /dev/md0'</b> to revuild the filesystem on the raid. CAUTION!!! WARNING!! THIS COMPLETELY ERASES ALL DATA ON /dev/md0\n"
"<li>Use <b>dsh -ac 'mount /dev/md0'</b> to remount it.\n"
"<li>Use <b>dsh -ac 'mkdir /mnt/raid/a ; chown mwells:mwells /mnt/raid/a</b> to create the 'a' directory and let user mwells, or other search engine administrator username, own it.\n"
"<li>Recopy over the necessary gb files to every machine.\n"
"<li>\n"
"</ul>\n"
"<br>\n"
"<b>To test a new gigablast executable:</b>"
"<ul>\n"
"<li>Change to the gigablast working directory.<br>"
" <b>$ cd /a</b>"
"<li>Run gb stop on the running gb executable on gf0.<br>"
" <b>$ gb stop</b>"
"<li>Wait until all hosts have stopped and saved their data. "
"(the following line should not print anything)<br>"
" <b>$ dsh -a 'ps auxww' | grep gb</b>"
"<li>Move the current gb executable to gb.SAVED.<br>"
" <b>$ mv gb gb.SAVED </b>"
"<li>Copy the new executable onto gf0<br>"
" <b>$ scp gb user@gf0:/a/</b>"
"<li>Install the executable on all machines.<br>"
" <b>$ gb installgb</b><br>"
"<li>This will copy the gb executable to all hosts. You"
" must wait until all of the scp processes have completed"
" before starting the gb process. Run ps to verify that all of"
" the scp processes have finished.<br>"
" <b>$ ps auxww</b>"
"<li>Run gb start<br>"
" <b>$ gb start </b>"
"<li>As soon as all of the hosts have started, you can use the "
"web interface to gigablast.<br>"
"</ul>\n"
"<b>To switch the live cluster from the current (cluster1) to another"
" (cluster2):</b>"
"<ul>\n"
"<li>Ensure that the gb.conf of cluster2 matches that of cluster1,"
" excluding any desired changes.<br>"
"<li>Ensure that the coll.conf for each collection on cluster2 matches those"
" of cluster1, excluding any desired changes.<br>"
"<li>Thoroughly test cluster2 using the blaster program.<br>"
"<li>Test duplicate queries between cluster1 and cluster2 and ensure results"
" properly match, with the exception of any known new changes.<br>"
"<li>Make sure port 80 on cluster2 is directing to the correct port for gb.<br>"
" <b>$ iptables -t nat -A PREROUTING -i eth0 -p tcp -m tcp --dport 80 -j"
" DNAT --to-destination 2.2.2.2:8000</b><br>"
"<li>Test that cluster2 works correctly by accessing it from a browser using"
" only it's IP in the address.<br>"
"<li>For both primary and secondary DNS servers, perform the following:<br>"
"<ul><li>Edit /etc/bind/db.&lt;hostname&gt; (i.e. db.gigablast.com)<br>"
" <b>$ vi /etc/bind/db.gigablast.com</b><br>"
" <li>Change lines using cluster1's ip to have cluster2's ip. It is"
" recommended that comment out the old line with a ; at the front.<br>"
" <b>i.e.: \"www&nbsp;&nbsp;IN&nbsp;&nbsp;A&nbsp;&nbsp;1.1.1.1\" >>"
" \"www&nbsp;&nbsp;IN&nbsp;&nbsp;A&nbsp;&nbsp;2.2.2.2\"</b><br>"
" <li>Edit /etc/bind/db.64<br>"
" <b>$ vi /etc/bind/db.64</b><br>"
" <li>Change lines with cluster1's last IP number to have cluster2's"
" last IP number.<br>"
" <b>i.e.: \"1&nbsp;&nbsp;IN&nbsp;&nbsp;PTR&nbsp;&nbsp;"
"www.gigablast.com\" >> \"2&nbsp;&nbsp;IN&nbsp;&nbsp;PTR&nbsp;&nbsp;"
"www.gigablast.com\"</b><br>"
" <li>Restart named.<br>"
" <b>$ /etc/rc3.d/S15bind9 restart</b><br>"
"</ul>"
"<li>Again, test that cluster2 works correctly by accessing it from a browser"
" using only it's IP in the address.<br>"
"<li>Check log0 of cluster2 to make sure it is recieving queries.<br>"
" <b>$ tail -f /a/log0</b><br>"
"<li>Allow cluster1 to remain active until all users have switched over to"
" cluster2.<br>"
"</ul><br>\n"
);
//if ( user == USER_MASTER ) p += gbstrlen ( p );
if ( g_users.hasPermission( r, PAGE_MASTER ) ) p += gbstrlen(p);
sprintf ( p ,
"<a name=trouble></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Troubleshooting"
"</td></tr></table>\n"
"<br>\n"
""
"<a name=disaster></a>\n"
"<b>A host in the network crashed. How do I temporarily decrease query latency on the network until I get it up again?</b><br>"
"You can go to the <i>Search Controls</i> page and cut all nine tier sizes in half. This will reduce search result recall, but should cut query latency times in half for slower queries until the crashed host is recovered. "
"<br><br>"
"<b>A host in the network crashed. What is the recovery procedure?</b><br>"
"First determine if the host's crash was clean or unclean. It was clean "
"if the host was able to save all data in memory before it crashed. If the "
"log ended with <i>allExit: dumping core after saving</i> then the crash "
"was clean, otherwise it was not."
"<br><br>"
"If the crash was clean then you can simply restart the crashed host by typing"
" <b>gb start <i>i</i></b> where <i>i</i> is the hostId of the crashed host. "
"However, if the crash was not clean, like in the case of a sudden power "
"outtage, then in order to ensure no data gets lost, you must copy the data "
"of the crashed host's twin. "
"If it does not have a twin then there may be some data loss and/or "
"corruption. In that case try reading the section below, <i>How do I minimize "
"the damage after an unclean crash with no twin?</i>, but you may be better "
"off starting the index build from "
"scratch. To recover from an unclean crash using the twin, follow the steps "
"below: "
"<br><br>"
"a. Click on 'all spiders off' in the 'master controls' of host #0, or "
"host #1 if host #0 was the host that crashed.<br>"
"b. If you were injecting content directly into Gigablast, stop.<br>"
"c. Click on 'all just save' in the 'master controls' of host #0 or host #1 "
"if host #0 was the one that crashed.<br>"
"d. Determine the twin of the crashed host by looking in the "
"<a href=\"#hosts\">hosts.conf</a> file. "
"The twin will have the same group number as the crashed host.<br>"
"e. Recursively copy the working directory of the twin to a spare host "
" using rcp since it is much faster than scp.<br>"
"f. Restart the crashed host by typing <b>gb start <i>i</i></b> where "
"<i>i</i> is the hostId of the crashed host. If it is not restartable, then "
"skip this step.<br>"
"g. If the crashed host was restarted, wait for it to come back up. Monitor "
"another host's <i>hosts</i> table to see when it is up, or watch the log of "
"the crashed host.<br>"
"h. If the crashed host was restarted, wait a minute for it to absorb all "
"of the data add requests that may still be lingering. Wait for all hosts' "
"<i>spider queues</i> of urls currently being spidered to be empty of "
"urls.<br>"
"i. Perform another <i>all just save</i> command to relegate any new data "
"to disk.<br>"
"j. After the copy completes edit the hosts.conf on host #0 and replace the "
"ip address of the crashed host with that of the spare host.<br>"
"k. Do a <b>gb stop</b> to safely shut down all hosts in the network.<br>"
"l. Do a <b>gb installconf</b> to propagate the hosts.conf file from host #0 "
"to all other hosts in the network (including the spare host, but not the "
"crashed host)<br>"
"m. Do a <b>gb start</b> to bring up all hosts under the new hosts.conf file."
"<br>"
"n. Monitor all logs for a little bit by doing <i>dsh -ac 'tail -f /a/log? "
"/a/log\?\?'</i><br>"
"o. Check the <i>hosts</i> table to ensure all hosts are up and running.<br>"
"<br><br>"
"<b>How do I minimize the damage after an unclean crash with no twin?</b><br>"
"You may never be able to get the index 100%% back into shape right now, but "
"in the near future there may be some technology that allows gigablast to "
"easily recover from these situations. For now though, "
"2. Try to determine the last url that was indexed and *fully* saved to disk. "
"Every time you index a url some data is added to all of these databases: "
"checksumdb, indexdb, spiderdb, titledb and tfndb. These databases all have "
"in-memory data that is periodically dumped to disk. So you must determine "
"the last time each of these databases dumped to disk by looking at the "
"timestamp on the corresponding files in the appropriate collection "
"subdirectories contained in the working directory. If tfndb was "
"dumped to disk the longest time ago, then use its timestamp "
"to indicate when the last url was successfully added or injected. You might "
"want to subtract thirty minutes from that timestamp to make sure because it "
"is really the time that that file <b>started</b> being dumped to disk that "
"you are after, and that timestamp represents the time of the last write to "
"that file. Now you can re-add the potentially missing urls from that time "
"forward and get a semi-decent recovery."
"<br><br>"
"<b>Gigablast is slow to respond to queries. How do I speed it up?</b><br>"
"a. If you see long purple lines in the Performance graph when "
"Gigablast is slow then that "
"means Gigablast is operating on a slow network OR your tier sizes, "
"adjustable on the Search Controls page, are way "
"too high so that too much data is clogging the network. "
"If your tier sizes are at the default values or lower, then the problem may "
"be that the bandwidth between one gigablast host and another is below "
"the required 1000Mbps. "
"Try doing a 'dmesg | grep Mbps' to see what speed your card is operating at. "
"Also try testing the bandwidth between hosts using the thunder program or "
"try copying a large file using rcp and timing it. Do not use scp since it is "
"often bottlenecked on the CPU due to the encryption that it does. If your "
"gigabit card is operating at 100Mbps that can sometimes be fixed by "
"rebooting. I've found that there is about a 20%% chance that the reboot "
"will make the card come back to 1000Mbps."
"<br><br>"
"b. If you see lots of long black lines on the Performance graph then that "
"means your disk is slowing everything down. Make sure that if you are doing "
"realtime queries that you do not have too many big indexdb files. If you "
"tight merge everything it should fix that problem. Otherwise, consider "
"getting a raid level 0 and faster disks. Perhaps the filesystem is "
"severly fragmented."
"Or maybe your query traffic is repetetive. If the queries are sorted "
"alphabetically, or you have many duplicate queries, then most of the "
"workload might be falling on one particular host in the network, thus "
"bottle-necking everything."
"<br><br>"
"<b>I get different results for the XML feed "
"(raw=X) as compared to the HTML feed. "
"What is going on?</b><br>"
" Try adding the &rt=1 cgi parameter to the "
"search string to tell Gigablast to return real "
"time results."
"rt is set to 0 by default for the XML feed, but "
"not for the HTML feed. That means Gigablast will "
"only look at the root indexdb file when looking up "
"queries. Any newly added pages will be indexed "
"outside of the root file until a merge is done. "
"This is done for performance reasons. You can enable "
"real time look ups by adding &rt=1 to the search "
"string. Also, in your search controls there are "
"options to enable or disable real time lookups for "
"regular queries and XML feeds, labeled as \"restrict "
"indexdb for queries\" and \"restrict indexdb for "
"xml feed\". Make sure both regular queries and "
"xml queries are doing the same thing when comparing "
"results."
"<br>"
"<br>"
"Also, you need to look at the tier sizes at the "
"top of the Search Controls page. The tier sizes "
" (tierStage0, tierStage1, ...) listed for the "
"raw (XML feed) queries needs to match non-raw "
"in order to get exactly the same results. Smaller "
"tier sizes yield better performance but yield "
"less search results."
"<br><br>"
"<b>The spider is on but no urls are showing up in the Spider Queue table "
"as being spidered. What is wrong?</b><br>"
"<table width=100%%>"
"<tr><td>1. Set <i>log spidered urls</i> to YES on the <i>log</i> page. Then "
"check the log to see if something is being logged."
"</td></tr>"
"<tr><td>2. Check the <i>master controls</i> page for the following:<br>"
" &nbsp; a. the <i>spider enabled</i> switch is set to YES.<br>"
" &nbsp; b. the <i>spider max pages per second</i> control is set "
"high enough.<br>"
" &nbsp; c. the <i>spider max kbps</i> control is set high enough.</td></tr>"
"</td></tr>"
"<tr><td>3. Check the <i>spider controls</i> page for the following:<br>"
" &nbsp; a. the collection you wish to spider for is selected (in red).<br>"
" &nbsp; a. the <i>old</i> or <i>new spidering</i> is set to YES.<br>"
" &nbsp; b. the appropriate <i>old</i> and <i>new spider priority</i> "
"checkboxes are checked.<br>"
" &nbsp; c. the <i>spider start</i> and <i>end times</i> are set "
"appropriately.<br>"
" &nbsp; d. the <i>use current time</i> control is set correctly.<br>"
" &nbsp; e. the <i>spider max pages per second</i> control is set "
"high enough.<br>"
" &nbsp; f. the <i>spider max kbps</i> control is set high enough.</td></tr>"
"<tr><td>3. If you have urls from only a few domains then the <i>same domain "
"wait</i> or <i>same ip wait</i> controls could be limiting the spidering "
"of the urls such that you do not see any in the Spider Queue table. If the "
"indexed document count on the home page is increasing then this may be the "
"case. Even if the count is not increasing, it may still be the case if the "
"documents all have errors, like 404 not found.</td></tr>"
"<tr><td>"
"4. Make sure you have urls to spider by running 'gb dump s <collname>' "
"on the command line to dump out spiderdb. See 'gb -h' for the help menu and "
"more options.</td></tr>"
"</table>"
"<br><br>"
"<b>The spider is slow.</b><br>"
"<table width=100%%>"
"<tr><td>In the current spider queue, what are the statuses of each url? If "
"they are mostly \"getting cached web page\" and the IP address column is "
"mostly empty, then Gigablast may be bogged down looking up the cached web "
"pages of each url in the spider queue only to discover it is from a domain "
"that was just spidered. This is a wasted lookup, and it can bog things down "
"pretty quickly when you are spidering a lot of old urls from the same "
"domain. "
"Try setting <i>same domain wait</i> and <i>same ip wait</i> both to 0. This "
"will pound those domain's server, though, so be careful. Maybe set it to "
"1000ms or so instead. We plan to fix this in the future."
"</td></tr>"
"</table>"
"<br><br>"
"<b>The spider is always bottlenecking on <i>adding links</i>.</b><br>\n"
"<table width=100%%>\n"
"<tr><td>Try increasing the &lt;tfnbMaxPageCacheMem&gt; in the gb.conf for all hosts in the cluster to minimize the <i>disk seeks</i> into tfndb as seen on the Stats page. Stop all gb processes then use <i>./gb installconf</i> to distribute the gb.conf to all hosts in the cluster. You migh also try decreasing the size of the url filters table, every regular expression in that table is consulted for every link added and it can really block the cpu.\n"
"</td></tr>\n"
"</table>\n"
"<br><br>"
);
//if ( user == USER_MASTER ) p += gbstrlen ( p );
if ( g_users.hasPermission( r, PAGE_MASTER ) ) p += gbstrlen(p);
p += sprintf ( p ,
"<a name=security></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>The Security System\n"
"</td></tr></table>\n"
"<br>\n"
"Every request sent to the Gigablast server is assumed to come from one "
"of four types of users. A public user, a spam assassin, a collection admin, "
"or a master admin. "
"A collection admin has control over the controls corresponding to a "
"particular collection. A spam assassin has control over even fewer controls "
"over a particular collection in order to remove pages from it. "
"A master admin has control over all aspects and all collections. "
"<br><br>"
"To verify a request is from an admin or spam assassin Gigablast requires "
"that the request contain a password or come from a listed IP. "
"To maintain these lists of passwords and IPs for the master admin, "
"click on the \"security\" tab. To maintain them for a collection admin or "
"for a spam assassin, click on the \"access\" tab for that collection. "
"Alternatively, the master passwords and IPs can be edited in the gb.conf "
"file in the working dir and collection admin passwords and IPs can be edited "
"in the coll.conf file in the collections subdirectory in the working dir. "
"<br><br>"
"To add a further layer of security, Gigablast can server all of its pages "
"through the https interface. By changing http:// to https:// and using the "
"SSL port you specified in hosts.conf, all requests and responses will be "
"made secure. "
"<br><br>\n"
);
p += sprintf ( p ,
"<a name=build></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Building an Index\n"
"</td></tr></table>\n"
"<br>\n"
"\n"
"<b>1.</b> Determine a collection name for your index. You may just want to use the default, unnamed collection. Gigablast is capable of handling many sub-indexes, known as collections. Each collection is independent of the other collections. You can add a new collection by clicking on the <b>add new collection</b> link on the <a href=\"/admin/spider\">Spider Controls</a> page."
"<br><br>\n"
"\n"
"<b>2.</b> Describe the set of URLs you would like to index to Gigablast by inputting <a href=\"http://www.phpbuilder.com/columns/dario19990616.php3\">regular expressions</a> into Gigablast's \n"
"<a href=\"/admin/filters\">URL Filters page</a>. \n"
"On that page you can tell Gigablast how often to \n"
"re-index a URL in order to pick up any changes to that URL's content.\n"
"You can assign <a href=#ruleset>rulesets</a> and spider priorities to URLs as well. Furthermore, you can assign a default ruleset and spider priority for all URLs not conforming to any regular expressions you entered.\n"
"<br><br>\n"
"\n"
"<b>3.</b> Test your Regular Expressions. Once you've submitted your \n"
"regular expressions try entering some URLs in the second pink box, entitled,\n"
"<i>URL Filters Test</i> on the <a href=\"/admin/filters\">URL Filters page</a>. This will help you make sure that you've entered your regular expressions correctly.\n"
"<br><br>\n"
"\n"
"<b>4.</b> Enable \"add url\". By enabling the add url interface you will be able to tell Gigablast to index some URLs. You must make sure add url is enabled on the <a href=\"/master\">Master Controls</a> page and also on the <a href=\"/admin/spider\">Spider Controls</a> page for your collection. If it is disabled on the Master Controls page then you will not be able to add URLs for *any* collection.\n"
"<br><br>\n"
"\n"
"<b>5.</b> Submit some seed URLs. Go to the <a href=\"/addurl\">add url \n"
"page</a> for your collection and submit some URLs you'd like to put in your\n"
"index. Usually you want these URLs to have a lot of outgoing links that \n"
"point to other pages you would like to have in your index as well. Gigablast's\n"
"spiders will follow these links and index whatever web pages they point to,\n"
"then whatever pages the links on those pages point to, ad inifinitum. But you\n"
"must make sure that <b>spider links</b> is enabled on the <a href=\"/admin/spider\">Spider Controls</a> page for your collection.\n"
"<br><br>\n"
"\n"
"<b>5.a.</b> Check the spiders. You can go to the <b>Spider Queue</b> page to "
"see what urls are currently being spidered from all collections, as well as see what urls exist in various priority queues, and what urls are cached from various priority queues. If you urls are not being spidered check to see if they are in the various spider queues. Urls added via the add url interface usually go to priority queue 5 by default, but that may have been changed on the Spider Controls page to another priority queue. And it may have been added to any of the hosts' priority queue on the network, so you may have to check each one to find it."
"<br><br>\n"
"If you do not see it on any hosts you can do an <b>all just save</b> in the Master Controls on host #0 and then dump spiderdb using gb's command line dumping function, <b>gb dump s 0 -1 1 -1 5</b> (see gb -h for help) on every host in the cluster and grep out the url you added to see if you can find it in spiderdb."
"<br><br>"
"Then make sure that your spider start and end time on the Spider Controls encompas, and old or new spidering is enabled, and spidering is enabled for that priority queue. If all these check out the url should be spidered asap."
"<br><br>\n"
"<b>6.</b> Regulate the Spiders. Given enough hardware, Gigablast can index \n"
"millions of pages PER HOUR. If you don't want Gigablast to thrash your or\n"
"someone else's website\n"
"then you should adjust the time Gigablast waits between page requests to the\n"
"same web server. To do this go to the \n"
"<a href=\"/admin/spider\">Spider Controls</a> page for your collection and set\n"
"the <b>same domain wait</b> and <b>same ip wait</b> values to how long you want Gigablast to wait in between page requests to the same domain or the same IP address respectively. This value is in milliseconds (ms). There are 1000"
"milliseconds in one second. That is, 1000 ms equals 1 second.\n"
"You must then click on the\n"
"<i>update</i> button at the bottom of that page to submit your new value.\n"
"<br><br>\n"
"\n"
"<b>7.</b> Turn on the new spider. Go to the \n"
"<a href=\"/admin/spider\">Spider Controls</a> page for your collection and \n"
"turn on <b>enable new spidering</b>. It should be at the top of the \n"
"controls table. You may also have to turn on spidering from the \n"
"<a href=\"/master\">Master Controls</a> page which is a master switch for all\n"
"collections.\n"
"<br><br>\n"
"\n"
"<b>8.</b> Monitor the spider's progress. By visiting the \n"
"<a href=\"/admin/spiderdb\">Spider Queue</a> page for your collection you can see what\n"
"URLs are currently being indexed in real-time. Gigablast.com currently has 32"
"hosts and each host spiders different URLs. You can easily switch between \n"
"these hosts by clicking on the host numbers at the top of the page.\n"
"<br><br><br>\n"
"\n"
"\n"
"\n"
"\n"
"<a name=spider></>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>The Spider\n"
"</td></tr></table>\n"
"<br>\n"
"<b>Robots.txt</b>\n"
"<br><br>\n"
"The name of Gigablast's spider is Gigabot. \n"
"Gigabot/1.0 is used for the User-Agent field of all HTTP mime headers that Gigablast transmits. \n"
"Gigabot respects the <a href=/spider.html>robots.txt convention</a> (robot exclusion) as well as supporting the meta noindex, noarchive and nofollow meta tags. You can tell Gigabot to ignore robots.txt files on the <a href=\"/admin/spider\">Spider Controls</a> page.\n"
"<br><br>\n"
"<b><a name=classifying>Classifying URLs</a></b> \n"
"<br><br>\n"
"You can specify different indexing and spider parameters on a per URL basis by one or more of the following methods:\n"
"<br><br>\n"
"<ul>\n"
"<li>Using the <a href=\"/master/tagdb\">tagdb interface</a>, you can assign a <a href=#ruleset>ruleset</a> to a set of sites. All you do is provide Gigablast with a list of sites and the ruleset to use for those sites.\n"
"You can enter the sites via the <a href=\"/master/tagdb\">HTML form</a> or you can provide Gigablast with a file of the sites. Each file must be limited to 1 Megabyte, but you can add hundreds of millions of sites. \n"
"Sites can be full URLs, hostnames, domain names or IP addresses.\n"
"If you add a site which is just a canonical domain name with no explicit host name, like gigablast.com, then any URL with the same domain name, regardless of its host name will match that site. That is, \"hostname.gigablast.com\" will match the site \"gigablast.com\" and therefore be assigned the associated ruleset.\n"
"Sites may also use IP addresses instead of domain names. If the least significant byte of an IP address that you submit to tagdb is 0 then any URL with the same top 3 IP bytes as that IP will be considered a match.\n"
"<li>You can specify a regular expression to describe a set of URLs using the interface on the <a href=\"/admin/filters\"></a>URL filters</a> page. You can then assign a <a href=#ruleset>ruleset</a> that describes how to spider those URLs and how to index their content. Currently, you can also explicitly assign a spider frequency and spider queue to matching URLs. If these are specified they will override any values in the ruleset."
"</ul>\n"
"If the URL being spidered matches a site in tagdb then Gigablast will use the corresponding ruleset from that and will not bother searching the regular expressions on the <a href=\"/admin/filters\"></a>URL filters</a> page.\n"
"<br><br>\n"
"<a name=\"spiderqueue\">\n"
"<b>Spider Queues</b>\n"
"<br><br>\n"
"Gigablast uses spider queues to hold and partition URLs. Each spider queue has an associated priority which ranges from 0 to 7. Furthermore, each queue is either denoted as <i>old</i> or <i>new</i>. Old spider queues hold URLs whose content is currently in the index. New spider queues hold URLs whose content is not in the index. The priority of a URL is the same as the priority of the spider queue to which it belongs. You can explicitly assign the priority of a URL by specifying it in a <a href=#ruleset>ruleset</a> to which that URL has been assigned or by assigning it on the <a href=\"/admin/filters\"></a>URL filters</a> page.\n"
"<br><br>\n"
"On the <a href=\"/admin/spider\">Spider Controls</a> page you can toggle the spidering of individual spider queues as well as link harvesting. More control on a per queue basis will be available soon, perhaps including the ability to assign a ruleset to a spider queue.\n"
"<br><br>\n"
"The general idea behind spider queues is that it allows Gigablast to prioritize its spidering. If two URLs are overdue to be spidered, Gigabot will download the one in the spider queue with the highest priority before downloading the other. If the two URLs have the same spider priority then Gigabot will prefer the one in the new spider queue. If they are both in the new queue or both in the old queue, then Gigabot will spider them based on their scheduled spider time.\n"
"<br><br>\n"
"Another aspect of the spider queues is that they allow Gigabot to perform depth-first spidering. When no priority is explicitly given for a URL then Gigabot will assign the URL the priority of the \"linker from which it was found\" minus one.\n"
"<br><br>\n"
"<b>Custom Filters</b>\n"
"<br><br>\n"
"You can write your own filters and hook them into Gigablast. A filter is an executable that takes an HTTP reply as input through stdin and makes adjustments to that input before passing it back out through stdout. The HTTP reply is essentially the reply Gigabot received from a web server when requesting a URL. The HTTP reply consists of an HTTP MIME header followed by the content for the URL.\n"
"\n"
"<br><br>\n"
"Gigablast also appends <b>Last-Indexed-Date</b>, <b>Collection</b>, <b>Url</b> and <b>DocId</b> fields to the MIME in order to supply your filter with more information. The Last-Indexed-Date is the time that Gigablast last indexed that URL. It is -1 if the URL's content is currently not in the index.\n"
"<br><br>\n"
"You can specify the name of your filter (an executable program) on the <a href=\"/admin/spider\">Spider Controls</a> page. After Gigabot downloads a web page it will write the HTTP reply into a temporary file stored in the /tmp directory. Then it will pass the filename as the first argument to the first filter by calling the system() function. popen() was used previously but was found to be buggy under Linux 2.4.17. Your program should send the filtered reply back out through stdout.\n"
"<br><br>\n"
"You can use multiple filters by using the pipe operator and entering a filter like \"./filter1 | ./filter2 | ./filter3\". In this case, only \"filter1\" would receive the temporary filename as its argument, the others would read from stdin.\n"
"<br><br>\n"
"<a name=quotas></>\n"
"<b>Document Quotas</b>\n"
"<br><br>\n"
"You can limit the number of documents on a per site basis. By default "
"the site is defined to be the full hostname of a url, like, "
"<i>www.ibm.com</i>. However, using tagdb you can define the site as a "
"domain or even a subfolder within the url. By adjusting the "
"&lt;maxDocs&gt; "
"parameter in the <a href=#ruleset>ruleset</a> for a particular url you "
"can control how many documents are allowed into the index from that site. "
"Additionally, the quotaBoost tables in the same ruleset file allow you to "
"influence how a quota is changed based on the quality of the url being "
"indexed and the quality of its root page. Furthermore, the Spider Controls "
"allow you to turn quota checking on and off for old and new documents. "
"<br><br>"
"The quota checking routine quickly obtains a decent approximation of how "
"many documents a particular site has in the index, but this approximation "
"becomes "
"higher than the actual count as the number of big indexdb files increases, "
"so you may want to keep &lt;indexdbMinFilesToMerge&gt; in "
"<a href=#config>gb.conf</a> "
"down to a value of around "
"five or so to ensure a half way decent approximation. Typically you can "
"excpect to be off by about 1000 to 2000 documents for every indexdb file "
"you have."
"<br><br>\n"
"<br><br>\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"<a name=injecting></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Injecting Documents"
"</td></tr></table>\n"
"<br>\n"
"<b>Injection Methods</b>\n"
"<br><br>\n"
"Gigablast allows you to inject documents directly into the index by using the command <b>gb [-c &lt;<a href=#hosts>hosts.conf</a>&gt;] &lt;hostId&gt; --inject &lt;file&gt;</b> where &lt;file&gt; must be a sequence of HTTP requests as described below. They will be sent to the host with id &lt;hostId&gt;."
"<br><br>\n"
"You can also inject your own content a second way, by using the <a href=\"/admin/inject\">Inject URL</a> page. "
"<br><br>\n"
"Thirdly you can use your own program to feed the content directly to Gigablast using the same form parameters as the form on the Inject URL page."
"<br><br>\n"
"In any of the three cases, be sure that url injection is enabled on the <a href=/master>Master Controls</a> page."
""
"<br><br><br>\n"
"<b>Input Parameters</b>\n"
"<br><br>\n"
"When sending an injection HTTP request to a Gigablast server, you may optionally supply an HTTP MIME in addition to the content. This MIME is treated as if Gigablast's spider downloaded the page you are injecting and received that MIME. If you do supply this MIME you must make sure it is HTTP compliant, preceeds the actual content and ends with a \"\r\n\r\n\" followed by the content itself. The smallest mime header you can get away with is \"HTTP 200\r\n\r\n\" which is just an \"OK\" reply from an HTTP server."
""
"<br><br>\n"
"The cgi parameters accepted by the /inject URL for injecting content are the following: (<b>remember to map spaces to +'s, etc.</b>)"
"<br><br>\n"
"\n"
"<table cellpadding=4>\n"
"\n"
"<tr><td bgcolor=#eeeeee>u=X</b></td>\n"
"<td bgcolor=#eeeeee>X is the url you are injecting. This is required.</td></tr>\n"
"\n"
"<tr><td>c=X</b></td>\n"
"<td>X is the name of the collection into which you are injecting the content. This is required.</td></tr>\n"
"\n"
"<tr><td bgcolor=#eeeeee>delete=X</b></td>\n"
"<td bgcolor=#eeeeee>X is 0 to add the URL/content and 1 to delete the URL/content from the index. Default is 0.</td></tr>\n"
"\n"
"<tr><td>ip=X</b></td>\n"
"<td>X is the ip of the URL (i.e. 1.2.3.4). If this is ommitted or invalid then Gigablast will lookup the IP, provided <i>iplookups</i> is true. But if <i>iplookups</i> is false, Gigablast will use the default IP of 1.2.3.4.</td></tr>\n"
"\n"
"<tr><td bgcolor=#eeeeee>iplookups=X</b></td>\n"
"<td bgcolor=#eeeeee>If X is 1 and the ip of the URL is not valid or provided then Gigablast will look it up. If X is 0 Gigablast will never look up the IP of the URL. Default is 1.</td></tr>\n"
"\n"
"<!--<tr><td>isnew=X</b></td>\n"
"<td>If X is 0 then the URL is presumed to already be in the index. If X is 1 then URL is presumed to not be in the index. Omitting this parameter is ok for now. In the future it may be put to use to help save disk seeks. Default is 1.</td></tr>-->\n"
"\n"
"<tr><td>dedup=X</b></td>\n"
"<td>If X is 1 then Gigablast will not add the URL if another already exists in the index from the same domain with the same content. If X is 0 then Gigablast will not do any deduping. Default is 1.</td></tr>\n"
"\n"
"<tr><td bgcolor=#eeeeee>rs=X</b></td>\n"
"<td bgcolor=#eeeeee>X is the number of the <a href=#ruleset>ruleset</a> to use to index the URL and its content. It will be auto-determined if <i>rs</i> is omitted or <i>rs</i> is -1.</td></tr>\n"
"\n"
"<tr><td>quick=X</b></td>\n"
"<td>If X is 1 then the reply returned after the content is injected is the reply described directly below this table. If X is 0 then the reply will be the HTML form interface.</td></tr>\n"
"\n"
"<tr><td bgcolor=#eeeeee>hasmime=X</b></td>\n"
"<td bgcolor=#eeeeee>X is 1 if the provided content includes a valid HTTP MIME header, 0 otherwise. Default is 0.</td></tr>\n"
"\n"
"<tr><td>content=X</b></td>\n"
"<td>X is the content for the provided URL. If <i>hasmime</i> is true then the first part of the content is really an HTTP mime header, followed by \"\r\n\r\n\", and then the actual content.</td></tr>\n"
"\n"
"<tr><td bgcolor=#eeeeee>ucontent=X</b></td>\n"
"<td bgcolor=#eeeeee>X is the UNencoded content for the provided URL. Use this one <b>instead</b> of the <i>content</i> cgi parameter if you do not want to encode the content. This breaks the HTTP protocol standard, but is convenient because the caller does not have to convert special characters in the document to their corresponding HTTP code sequences. <b>IMPORTANT</b>: this cgi parameter must be the last one in the list.</td></tr>\n"
"\n"
"</table>\n"
"\n"
"<br><br>\n"
"<b>Sample Injection Request</b> (line breaks are exclusively specified by \r\n sequences):<br>\n"
"\n"
"<pre>\n"
"POST /inject HTTP/1.0\r\n\n"
"Content-Length: 291\r\n\n"
"Content-Type: text/html\r\n\n"
"Connection: Close\r\n\n"
"\r\n\n"
"u=myurl&c=&delete=0&ip=4.5.6.7&iplookups=0&dedup=1&rs=7&quick=1&hasmime=1&ucontent=HTTP 200\r\n\n"
"Last-Modified: Sun, 06 Nov 1994 08:49:37 GMT\r\n\n"
"Connection: Close\r\nContent-Type: text/html\r\n\r\n<title>Overview</title>\n"
"<html>This is the unencoded content of the page we are injecting.</html>\n"
"</pre>\n"
"<br>\n"
"<b>The Reply</b>\n"
"<br><br>\n"
"<a name=ireply></a>"
"The reply is always a typical HTTP reply, but if you defined <i>quick=1</i> then the *content* (the stuff below the returned MIME) of the HTTP reply to the injection request is of the format:"
"<br>\n"
"<br>\n"
"&lt;X&gt; docId=&lt;Y&gt; hostId=&lt;Z&gt;"
"<br>\n"
"<br>\n"
"OR"
"<br>\n"
"<br>\n"
"&lt;X&gt; &lt;error message&gt;"
"<br>\n"
"<br>\n"
"Where &lt;X&gt; is a string of digits in ASCII, corresponding to the error code. X is 0 on success (no error) in which case it will be followed by a <b>long long</b> docId and a hostId, which corresponds to the host in the <a href=#hosts>hosts.conf</a> file that stored the document. Any twins in its group will also have copies. If there was an error then X will be greater than 0 and may be followed by a space then the error message itself. If you did not define <i>quick=1</i>, then you will get back a response meant to be viewed on a browser."
"<br>\n"
"<br>\n"
" Make sure to read the complete reply before spawning another request, lest Gigablast become flooded with requests."
"<br>\n"
"<br>\n"
"Example success reply: <b>0 docId=123543 hostId=0</b><br>\n"
"Example error reply: <b>12 Cannot allocate memory</b>\n"
"<br>\n"
"<br>\n"
"See the <a href=#errors>Error Codes</a> for all errors, but the following\n"
"errors are most likely:<br>\n"
"<table cellpadding=2>\n"
"<tr><td><b>%5li %s</b></td>"
"<td>There was a shortage of memory to properly "
"process the request.</td></tr>\n"
"<tr><td><b>%05li %s</b></td>"
"<td>A cached page was not found when it should have "
"been, likely due to corrupt data on disk.</td></tr>\n"
"<tr><td><b>%5li %s</b></td>"
"<td>There was a shortage of resources so the "
"request should be repeated.</td></tr>\n"
"<tr><td><b>%5li %s</b></td>"
"<td>The injection was to a collection that does "
"not exist.</td></tr>\n"
"</table>\n",
(long)ENOMEM ,mstrerror(ENOMEM),
(long)ENOTFOUND,mstrerror(ENOTFOUND),
(long)ETRYAGAIN,mstrerror(ETRYAGAIN),
(long)ENOCOLLREC,mstrerror(ENOCOLLREC));
sprintf ( p ,
""
"<br>\n"
"\n"
"<br><br>\n"
"\n"
"\n"
"\n"
"\n"
"<a name=deleting></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Deleting Documents"
"</td></tr></table>\n"
"<br>\n"
"You can delete documents from the index two ways:"
"<ul>\n"
"<li>Perhaps the most popular is to use the <a href=\"/admin/reindex\">Reindex URLs</a> tool which allows you to delete all documents that match a simple query. Furthermore, that tool allows you to assign rulesets to all the domains of all the matching documents. All documents that match the query will have their docids stored in a spider queue of a user-specified priority. The spider will have to be enabled for that priority queue for the deletion to take place. Deleting documents is very similar to adding documents."
"<br><br>\n"
"<li>To delete a single document you can use the <a href=\"/admin/inject\">Inject URL</a> page. Make sure that url injection is enabled on the <a href=/master>Master Controls</a> page."
"</ul>\n"
"<br><br>\n"
""
""
"\n"
"\n"
"\n"
);
p += gbstrlen ( p );
sprintf ( p ,
""
""
""
""
"<a name=\"scoring\"></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Scoring A Document</font></a>\n"
"</td></tr></table>\n"
"<br><br>\n"
"Gigablast scores word and phrases in a document based on the following criteria:"
"<br>\n"
"<ul>\n"
"<li>the <b>quality</b> of the document"
"<li>the <b>count</b> of the word or phrase in the document"
"<li>the <b>locations</b> of the word or phrase in the document"
"<li>the <b>spaminess</b> of the word or phrase in the document"
"<li>the <b>length</b> of the part of the document being indexed"
"</ul>\n"
"<br>\n"
"By assigning a <a href=\"#ruleset\">ruleset</a> to a document, you can control exactly how Gigablast uses these criteria to generate the score of a word or phrase in that document."
"<br><br>\n"
"<br>\n"
"<b>Index Rules</b></center>\n"
"<br>\n"
"When Gigablast indexes a document it first chains down the <a href=\"#indexblock\">&lt;index&gt; tags</a> that are listed in the ruleset in the order they are presented. Each of these &lt;index&gt; tags, and all that it contains, represents one <b><font color=red>index rule</font></b>. Each index rule describes how to index a portion of the document. Different portions of the document may be indexed and scored in different ways. The order of these index rules can be very important, since the same word's score accumulates from one index rule to the next, and different index rules may have different score ceilings for that word."
""
"<br><br>\n"
"In addition to describing the various sub tags of an index rule in the <a href=\"#indexingsection\">sample ruleset file</a>, they are further described in the following table:"
""
"<br><br><br>\n"
"<b>Sub Tags of an Index Rule</b></center>\n"
"<br>\n"
"<table cellpadding=4>\n"
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"<a ref=\"#indexname\">&lt;name&gt;</a>X&lt;/&gt;"
"</td>\n"
"<td bgcolor=#eeeeee>This tag tells Gigablast what part of the document to index. You can have multiple &lt;name&gt; tags in the same index rule. <i>X</i> can be one of the following values:<br><br>\n"
""
"<table cellpadding=1>\n"
"<tr><td><i>ABSENT</i></td><td>If the &lt;name&gt; tag is not present Gigablast will restrict itself to all words and phrases in the document that are not in between a &lt; and &gt; of any tag in the document.</td></tr>\n"
"<tr><td>title</td><td>Tells Gigablast to index the words and phrases in between the first pair of &lt;title&gt; and &lt;/title&gt; tags present in the document, if one exists.</td></tr>\n"
"<tr><td>meta.keywords</td><td>Tells Gigablast to index the words and phrases contained in the content field of the &lt;meta name=keywords content=X&gt; tag.</td></tr>"
"<tr><td>meta.keyword</td><td> See above</td></tr>"
"<tr><td>meta.summary</td><td> See above</td></tr>"
"<tr><td>meta.description</td><td> See above</td></tr>"
"<tr><td>meta.foobar</td><td>COMING SOON (user-defined meta tags)</td></tr>"
"<tr><td>foobar</td><td>You can add your own tag, like <i>&lt;foobar&gt;index this&lt;/&gt;</i> to the document and then have an index rule with a &lt;name&gt;foobar&lt;/&gt; that contains rules on how to index it.</td></tr>\n"
"</table>\n"
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td>\n"
"&lt;prefix&gt;X&lt;/&gt;"
"</td>\n"
"<td>\n"
"If present, Gigablast will index the words and phrases with the specified prefix, <i>X</i>. Fielded searches can then be performed. Example: &lt;prefix&gt;title&lt;/&gt;"
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"&lt;maxQualityForSpamDetect&gt;X&lt;/&gt;"
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"Spam detection will be performed on the words and phrases if the document's quality is <i>X</i> or lower. Spam detection generally lowers the scores of repeated words and phrases based on the degree of repetition."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td>\n"
"&lt;minQualityToIndex&gt;X&lt;/&gt;"
"</td>\n"
"<td>\n"
"If the document's quality is below <i>X</i>, then do not index the words and phrases for this index rule."
"</td>\n"
"</tr>\n"
""
""
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"&lt;filterHtmlEntities&gt;X&lt;/&gt;"
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"If <i>X</i> is <i>yes</i> then convert HTML entities, like &AMP;gt;, into their represented characters before indexing."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td>\n"
"&lt;indexIfUniqueOnly&gt;X&lt;/&gt;"
"</td>\n"
"<td>\n"
"If <i>X</i> is <i>yes</i> then each word or phrase will only be indexed if not already indexed by a previous index rule in the ruleset, and only the first occurence of the word or phrase will be indexed, subsequent occurences will not count towards the score."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"&lt;indexSingletons&gt;X&lt;/&gt;"
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"If <i>X</i> is <i>yes</i> then index the words, otherwise do not."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td>\n"
"&lt;indexPhrases&gt;X&lt;/&gt;"
"</td>\n"
"<td>\n"
"If <i>X</i> is <i>yes</i> then index the phrases, otherwise do not."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"&lt;indexAsWhole&gt;X&lt;/&gt;"
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"If <i>X</i> is <i>yes</i> then index the whole sequence of indexable words as a checksum."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td>\n"
"&lt;useStopWords&gt;X&lt;/&gt;"
"</td>\n"
"<td>\n"
"If <i>X</i> is <i>yes</i> then use <a href=\"#stopwords\">stop words</a> when forming phrases."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"&lt;useStems&gt;X&lt;/&gt;"
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"If <i>X</i> is <i>yes</i> then index stems. Currently unsupported."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td>\n"
""
"&lt;quality11&gt; X1 &lt;/&gt;<br>\n"
"&lt;quality12&gt; X2 &lt;/&gt;...<br>\n"
"&lt;quality1N&gt; XN &lt;/&gt;<br>\n"
"&lt;maxLen11&gt; Y1 &lt;/&gt;<br>\n"
"&lt;maxLen12&gt; Y2 &lt;/&gt;...<br>\n"
"&lt;maxLen1N&gt; YN &lt;/&gt;<br>\n"
""
"</td>\n"
"<td>\n"
"This maps the quality of the document to a maximum number of CHARACTERS to index. <a name=\"mapdesc\"></a><b>The (Xn,Yn) points form a piecewise function which is linearly interpolated between points. The edges are horizontal, meaning, if X is 0 Y will be Y1, or if X is infinite, Y will be YN.</b>\n"
"</td>\n"
"</tr>\n"
""
""
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"<a name=\"v1\"></a>\n"
"&lt;quality21&gt; X1 &lt;/&gt;<br>\n"
"&lt;quality22&gt; X2 &lt;/&gt;...<br>\n"
"&lt;quality2N&gt; XN &lt;/&gt;<br>\n"
"&lt;maxScore21&gt; Y1 &lt;/&gt;<br>\n"
"&lt;maxScore22&gt; Y2 &lt;/&gt;...<br>\n"
"&lt;maxScore2N&gt; YN &lt;/&gt;<br>\n"
""
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"This maps the quality of the document to a percentage of the absolute max score a word or phrase can have. This is the QUALITY_WEIGHT_MAX value in the <a href=\"#formula\">formula</a>."
"</td>\n"
"</tr>\n"
""
""
"<tr>\n"
"<td>\n"
"<a name=\"v2\"></a>\n"
"&lt;quality31&gt; X1 &lt;/&gt;<br>\n"
"&lt;quality32&gt; X2 &lt;/&gt;...<br>\n"
"&lt;quality3N&gt; XN &lt;/&gt;<br>\n"
"&lt;scoreWeight31&gt; Y1 &lt;/&gt;<br>\n"
"&lt;scoreWeight32&gt; Y2 &lt;/&gt;...<br>\n"
"&lt;scoreWeight3N&gt; YN &lt;/&gt;<br>\n"
""
"</td>\n"
"<td>\n"
"This maps the quality of the document to a percentage weight on the base score of the words and phrases being indexed. This is the QUALITY_WEIGHT value in the <a href=\"#formula\">formula</a>."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"<a name=\"v3\"></a>\n"
"&lt;len41&gt; X1 &lt;/&gt;<br>\n"
"&lt;len42&gt; X2 &lt;/&gt;...<br>\n"
"&lt;len4N&gt; XN &lt;/&gt;<br>\n"
"&lt;scoreWeight41&gt; Y1 &lt;/&gt;<br>\n"
"&lt;scoreWeight42&gt; Y2 &lt;/&gt;...<br>\n"
"&lt;scoreWeight4N&gt; YN &lt;/&gt;<br>\n"
""
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"This maps the length (in characters) of the what is being indexed to a percentage weight on the base score of the words and phrases being indexed. This is the LENGTH_WEIGHT value in the <a href=\"#formula\">formula</a>."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td>\n"
"<a name=\"v4\"></a>\n"
"&lt;len51&gt; X1 &lt;/&gt;<br>\n"
"&lt;len52&gt; X2 &lt;/&gt;...<br>\n"
"&lt;len5N&gt; XN &lt;/&gt;<br>\n"
"&lt;maxScore51&gt; Y1 &lt;/&gt;<br>\n"
"&lt;maxScore52&gt; Y2 &lt;/&gt;...<br>\n"
"&lt;maxScore5N&gt; YN &lt;/&gt;<br>\n"
"</td>\n"
"<td>\n"
"This maps the length (in characters) of the what is being indexed to a percentage of the absolute maximum score a word or phrase can have. This is the LENGTH_WEIGHT_MAX value in the <a href=\"#formula\">formula</a>."
"</td>\n"
"</tr>\n"
"</table>\n"
""
"<a name=\"formula\"></>\n"
"<br><br>\n"
"<b>Computing the Score</b>\n"
"<br><br>\n"
"Each word in the document is assigned a base score with this formula : "
"<br>\n"
"<pre>\n"
"BASE_SCORE = min { (256 * QUALITY_WEIGHT * LENGTH_WEIGHT ) / 10000 + BOOST ,\n"
" (0xffffffffLL * QUALITY_WEIGHT_MAX * LENGTH_WEIGHT_MAX ) / 10000 }\n"
"</pre>\n"
"<br>\n"
"See above table for descriptions of these variables. The BOOST is 256 if the page links to gigablast.com or has a submission for to gigablast.com, but if not, the BOOST is 0."
""
"<br>\n"
"<br>\n"
"After the base score is computed, it is multiplied by the number of occurences of the word or phrase in the portion of the document being indexed as specified by the index rule. This score may then be reduced if spam detection occurred and the word or phrase was deemed repetitious. Spam detection is triggered when the quality of the document is at or below the value specified in the &lt;minQualityForSpamDetect&gt; tag in the index rule. Finally, the score is mapped into an 8 bit value, from 1 to 255, and stored in the index."
"<br><br>\n"
"To see the scoring algorithm in action you can use the <b><a href=\"/master/parser\">Parser Tool</a></b>. It will show each indexed word and phrase and its associated score, as well as some attributes associated with the indexed document."
""
"<br>\n"
"<br>\n"
"<br>\n"
"<b>Indexing Document Attributes</b>\n"
"<br>\n"
"<br>\n"
"Attributes which describe the document are often indexed but are deemed too simple to require their own index rule. This includes indexing portions of the url in various ways and indexing the content type of the document, as the table below illustrates."
"<br><br>\n"
"<table cellpadding=4>\n"
"<tr>\n"
"<td><b>Item</b></td>\n"
"<td><b>Ruleset Tag</b></td>\n"
"<td><b>Desription</b></td>\n"
"</tr>\n"
""
"<td bgcolor=#eeeeee>\n"
"&lt;meta name=foo content=bar&gt;"
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"--"
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"User-defined meta tags use the quality of the document multiplied by 256 as their score. If this product is 0 it is upped to 1. This score is then mapped to an 8-bit final score an indexed. Furthermore, when indexing user-defined meta tags, only one occurence of each word or phrase is counted. In the future, these meta tags may have their own index rule."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td>\n"
"http://www.xxx.com/abc"
"</td>\n"
"<td>\n"
"&lt;indexUrl&gt;X&lt;/&gt;"
"</td>\n"
"<td>If X is <i>yes</i> then the entire url is indexed as one word with a BASE_SCORE of 1 and with a url: prefix so a search for <i>url:http://www.xxx.com/</i> will bring up the document."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"http://www.xxx.com/abc"
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"&lt;indexSubUrl&gt;X&lt;/&gt;"
"</td>\n"
"<td bgcolor=#eeeeee>If X is <i>yes</i> then the url is indexed as if it occured in the document, but with a random BASE_SCORE (based on url hash) and a suburl: prefix so a search for <i>suburl:\"com/abc\"</i> will bring up the document."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td>\n"
"http://www.xxx.com/abc"
"</td>\n"
"<td>\n"
"&lt;indexIp&gt;X&lt;/&gt;"
"</td>\n"
"<td>If X is <i>yes</i> then the IP of the url will be indexed as if it were one word but with a random BASE_SCORE (based on url hash). Furthermore, the last number of the IP address is replaced with a zero and that IP address is indexed in order to provide an IP domain search ability. So if a url has the IP address 1.2.3.4 then a search for ip:1.2.3.4 or for ip:1.2.3 should bring it up."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"http://www.xxx.com/abc?q=hi"
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"&lt;indexSite&gt;X&lt;/&gt;"
"</td>\n"
"<td bgcolor=#eeeeee>If X is <i>yes</i> then the following terms would be indexed with a base score of BASE_SCORE (but multiplied by 3 if the url is a root url): "
"<ul>\n"
"<li>site:www.xxx.com/abc?q=hi"
"<li>site:www.xxx.com/abc?"
"<li>site:www.xxx.com/"
"<li>site:xxx.com/abc?q=hi"
"<li>site:xxx.com/abc?"
"<li>site:xxx.com/"
"</ul>\n"
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"http://www.xxx.com/form.php"
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"&lt;indexExt&gt;X&lt;/&gt;"
"</td>\n"
"<td bgcolor=#eeeeee>If X is <i>yes</i> then the file extension, if any, of the url would be indexed with the ext: prefix and a score of BASE_SCORE. So a query of ext:php would bring up the document in this example case."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td>\n"
"links"
"</td>\n"
"<td>\n"
"&lt;indexLinks&gt;X&lt;/&gt;"
"</td>\n"
"<td>If X is <i>yes</i> then the various links in the document will be indexed with a link: prefix. Scores are special in this case."
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td bgcolor=#eeeeee>\n"
"collection name"
"</td>\n"
"<td bgcolor=#eeeeee>\n"
"--"
"</td>\n"
"<td bgcolor=#eeeeee>The collection name of the document is indexed with the coll: prefix and a BASE_SCORE of 1. "
"</td>\n"
"</tr>\n"
""
"<tr>\n"
"<td>\n"
"content type"
"</td>\n"
"<td>\n"
"--"
"</td>\n"
"<td>The content type of the document is indexed with the type: (or filetype:) prefix and a BASE_SCORE of 1. If the content type is not one of these supported content types, then nothing will be indexed: "
"<ul>\n"
"<li>html"
"<li>text"
"<li>xml"
"<li>pdf"
"<li>doc"
"<li>xls"
"<li>ppt"
"<li>ps"
"</ul>\n"
"</td>\n"
"</tr>\n"
"</table>\n"
"<br>\n"
"<br>\n"
""
""
""
""
""
""
"<a name=metas></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Indexing User-Defined Meta Tags"
"</td></tr></table>\n"
"<br>\n"
"Gigablast supports the indexing, searching and displaying of user-defined meta tags. For instance, if you have a tag like <i>&lt;meta name=\"foo\" content=\"bar baz\"&gt;</i> in your document, then you will be able to do a search like <i><a href=\"/search?q=foo%%3Abar&dt=foo\">foo:bar</a></i> or <i><a href=\"/search?q=foo%%3A%%22bar+baz%%22&dt=foo\">foo:\"bar baz\"</a></i> and Gigablast will find your document. "
"<br><br>\n"
"You can tell Gigablast to display the contents of arbitrary meta tags in the search results, like <a href=\"/search?q=gigablast&s=10&dt=author+keywords%%3A32\">this</a>. Note that you must assign the <i>dt</i> cgi parameter to a space-separated list of the names of the meta tags you want to display. You can limit the number of returned characters of each tag to X characters by appending a <i>:X</i> to the name of the meta tag supplied to the <i>dt</i> parameter. In the link above, I limited the displayed keywords to 32 characters. The content of the meta tags is also provided in the &lt;display&gt; tags in the <a href=\"#output\">XML feed</a>\n"
"<br><br>\n"
"Gigablast will index the content of all meta tags in this manner. Meta tags with the same <i>name</i> parameter as other meta tags in the same document will be indexed as well.\n"
"<br><br>\n"
"Why use user-defined metas? Because it is very powerful. It allows you to embed custom data in your documents, search for it and retrieve it."
"<br>\n"
"<br>\n"
"You can also explicitly specify how to index certain meta tags by making an &lt;index&gt; tag in the <a href=\"#ruleset\">ruleset</a> as shown <a href=\"#rsmetas\">here</a>. The specified meta tags will be indexed in the user-defined meta tag fashion as described above, in addition to any method described in the ruleset."
"<br>\n"
"<br>\n"
"<br>\n"
""
""
""
""
""
"<a name=bigdocs></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Indexing Big Documents"
"</td></tr></table>\n"
"<br>\n"
"When indexing a document you will be bound by the available memory of the machine that is doing the indexing. A document that is dense in words can takes as much as ten times the memory as the size of the document in order to process it for indexing. Therefore you need to make sure that the amount of available memory is adequate to process the document you want to index. You can turn off Spam detection to reduce the processing overhead by a little bit."
"<br>\n"
"<br>\n"
"The <b>&lt;maxMem&gt;</b> tag in the <a href=#config>gb.conf</a> file controls the maximum amount of memory that the whole Gigablast process can use. HOWEVER, this memory is shared by databases, thread stacks, protocol stacks and other things that may or may not use most of it. Probably, the best way to see much memory is available to the Gigablast process for processing a big document is to look at the <b>Stats Page</b>. It shows you exactly how much memory is being used at the time you look at it. Hit refresh to see it change."
"<br>\n"
"<br>\n"
"You can also check all the tags in the gb.conf file that have the word \"mem\" in them to see where memory is being allocated. In addition, you will need to check the first 100 lines of the log file for the gigablast process to see how much memory is being used for thread and protocol stacks. These should be displayed on the Stats page, but are currently not."
"<br>\n"
"<br>\n"
"After ensuring you have enough extra memory to handle the document size, you will need to make sure the document fits into the tree that is used to hold the documents in memory before they get dumped to disk. The documents are compressed using zlib before being added to the tree so you might expect a 5:1 compression for a typical web page. The memory used to hold document in this tree is controllable from the <b>&lt;titledbMaxTreeMem&gt;</b> parameter in the gb.conf file. Make sure that is big enough to hold the document you would like to add. If the tree could accomodate the big document, but at the time is partially full, Gigablast will automatically dump the tree to disk and keep trying to add the big document."
"<br>\n"
"<br>\n"
"Finally, you need to ensure that the <b>max text doc len</b> and <b>max other doc len</b> controls on the <b>Spider Controls</b> page are set to accomodating sizes. Use -1 to indicate no maximum. <i>Other</i> documents are non-text and non-html documents, like PDF, for example. These controls will physically prohibit the spider from downloading more than this many bytes. This causes excessively long documents to be truncated. If the spider is downloading a PDF that gets truncated then it abandons it, because truncated PDFs are useless."
"<br>\n"
"<br>\n"
"<br>\n"
"<a name=langs></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Indexing Different Languages"
"</td></tr></table>\n"
"<br>\n"
"Gigablast currently just supports indexing the ISO-8859-1 (aka Latin-1) character set. This character set uses one byte (8 bits) per character. It covers most West European languages such as French, Spanish, Catalan, Galician, Basque, Portuguese, Italian, Albanian, Afrikaans, Dutch, German, Danish, Swedish, Norwegian, Finnish, Faroese, Icelandic, Irish, Scottish and English.<br><br>"
"Gigablast has a switch in the Spider Controls for enabling and disabling "
"the indexing of Asian character sets. If the spider is downloading a "
"document and Asian character sets are disallowed, then it will check for "
"the Content-Encoding field in the mime of the HTTP reply from the web "
"server. If the name of the character set is one of the Asian character "
"sets that Gigablast recognizes, then the document will NOT be indexed. "
"This control covers some of the more popular "
"characters sets in Asia, but if the character set is not recognized by "
"Gigablast it will be indexed as if it were the Latin-1 character set. "
"Likewise, all search queries are interpreted as belonging to the Latin-1 "
"character set."
"<br><br>"
"If Gigablast indexes a document as being from the Latin-1 character set "
"when in fact it is not, then Gigablast will parse out words as being "
"sequences of alpha-numeric characters. All other characters are considered "
"to be punctuation. The alpha-numeric characters are defined as a-z, A-Z, "
"0-9 and all of the letters with accent marks (192-255, except 215 and 247). "
"Everything else is considered a punctuation character. "
"<br><br>"
"Gigablast also has a #define that controls whether or not it will "
"convert accented letters into their unaccented equivalents. When accent "
"conversion is active, a word like <i>r&egrave;sume</i> is indexed "
"the same as the word <i>resume</i>. Accent conversion is on by default."
"<br><br>"
"In the future Gigablast may support many different character sets. "
"This involves translating the text of a particular character set into "
"Unicode, however, before this translation can be performed, the character "
"set must be recognized by Gigablast. In most cases the web servers and the "
"web browsers do a good job of providing the character set name in the HTTP "
"reply or HTTP request by supplying a Content-Encoding field in the "
"mime, however, when this is not supplied, Gigablast must make a best effort "
"to auto-detect it. Once the character set is determined, and the translation "
"to Unicode has been performed, the content can be parsed into \"words\", "
" basic units of meaning, inherent to that particular language."
"<br><br>"
"The following table describes the Latin-1 character set:<br>"
"<pre>"
"<B>Char Decimal Hex Entity Char Decimal Hex Entity\n"
" Reference Reference</b>\n"
"NUL 0 0 SOH 1 1\n"
"STX 2 2 ETX 3 3\n"
"EOT 4 4 ENQ 5 5\n"
"ACK 6 6 BEL 7 7\n"
"BS 8 8 HT 9 9\n"
"NL 10 a VT 11 b\n"
"NP 12 c CR 13 d\n"
"SO 14 e SI 15 f\n"
"DLE 16 10 DC1 17 11\n"
"DC2 18 12 DC3 19 13\
DC4 20 14 NAK 21 15\
SYN 22 16 ETB 23 17\
CAN 24 18 EM 25 19\
SUB 26 1a ESC 27 1b\
FS 28 1c GS 29 1d\
RS 30 1e US 31 1f\
SP 32 20 ! 33 21\
\" 34 22 &amp;quot; # 35 23\
$ 36 24 %% 37 25\
&amp; 38 26 &amp;amp; ' 39 27\
( 40 28 ) 41 29\
* 42 2a + 43 2b\
, 44 2c - 45 2d\
. 46 2e / 47 2f\
0 48 30 1 49 31\
2 50 32 3 51 33\
4 52 34 5 53 35\
6 54 36 7 55 37\
8 56 38 9 57 39\
: 58 3a ; 59 3b\
&lt; 60 3c &amp;lt; = 61 3d\
&gt; 62 3e &amp;gt; ? 63 3f\
@ 64 40 A 65 41\
B 66 42 C 67 43\
D 68 44 E 69 45\
F 70 46 G 71 47\
H 72 48 I 73 49\
J 74 4a K 75 4b\
L 76 4c M 77 4d\
N 78 4e O 79 4f\
P 80 50 Q 81 51\
R 82 52 S 83 53\
T 84 54 U 85 55\
V 86 56 W 87 57\
X 88 58 Y 89 59\
Z 90 5a [ 91 5b\
\\ 92 5c ] 93 5d\
^ 94 5e _ 95 5f\
` 96 60 a 97 61\
b 98 62 c 99 63\
d 100 64 e 101 65\
f 102 66 g 103 67\
h 104 68 i 105 69\
j 106 6a k 107 6b\
l 108 6c m 109 6d\
n 110 6e o 111 6f\
p 112 70 q 113 71\
r 114 72 s 115 73\
t 116 74 u 117 75\
v 118 76 w 119 77\
x 120 78 y 121 79\
z 122 7a { 123 7b\
| 124 7c } 125 7d\
~ 126 7e DEL 127 7f\
-- 128 80 -- 129 81\
-- 130 82 -- 131 83\
-- 132 84 -- 133 85\
-- 134 86 -- 135 87\
-- 136 88 -- 137 89\
-- 138 8a -- 139 8b\
-- 140 8c -- 141 8d\
-- 142 8e -- 143 8f\
-- 144 90 -- 145 91\
-- 146 92 -- 147 93\
-- 148 94 -- 149 95\
-- 150 96 -- 151 97\
-- 152 98 -- 153 99\
-- 154 9a -- 155 9b\
-- 156 9c -- 157 9d\
-- 158 9e -- 159 9f\
<EFBFBD> 160 a0 <em>&amp;nbsp;</em> <20> 161 a1 <em>&amp;iexcl;</em>\
<EFBFBD> 162 a2 <em>&amp;cent;</em> <20> 163 a3 <em>&amp;pound;</em>\
<EFBFBD> 164 a4 <em>&amp;curren;</em> <20> 165 a5 <em>&amp;yen;</em>\
<EFBFBD> 166 a6 <em>&amp;brvbar;</em> <20> 167 a7 <em>&amp;sect;</em>\
<EFBFBD> 168 a8 <em>&amp;uml;</em> <20> 169 a9 <em>&amp;copy;</em>\
<EFBFBD> 170 aa <em>&amp;ordf;</em> <20> 171 ab <em>&amp;laquo;</em>\
<EFBFBD> 172 ac <em>&amp;not;</em> <20> 173 ad <em>&amp;shy;</em>\
<EFBFBD> 174 ae <em>&amp;reg;</em> <20> 175 af <em>&amp;macr;</em>\
<EFBFBD> 176 b0 <em>&amp;deg;</em> <20> 177 b1 <em>&amp;plusmn;</em>\
<EFBFBD> 178 b2 <em>&amp;sup2;</em> <20> 179 b3 <em>&amp;sup3;</em>\
<EFBFBD> 180 b4 <em>&amp;acute;</em> <20> 181 b5 <em>&amp;micro;</em>\
<EFBFBD> 182 b6 <em>&amp;para;</em> <20> 183 b7 <em>&amp;middot;</em>\
<EFBFBD> 184 b8 <em>&amp;cedil;</em> <20> 185 b9 <em>&amp;sup1;</em>\
<EFBFBD> 186 ba <em>&amp;ordm;</em> <20> 187 bb <em>&amp;raquo;</em>\
<EFBFBD> 188 bc <em>&amp;frac14;</em> <20> 189 bd <em>&amp;frac12;</em>\
<EFBFBD> 190 be <em>&amp;frac34;</em> <20> 191 bf <em>&amp;iquest;</em>\
<EFBFBD> 192 c0 &amp;Agrave; <20> 193 c1 &amp;Aacute;\
<EFBFBD> 194 c2 &amp;Acirc; <20> 195 c3 &amp;Atilde;\
<EFBFBD> 196 c4 &amp;Auml; <20> 197 c5 &amp;Aring;\
<EFBFBD> 198 c6 &amp;AElig; <20> 199 c7 &amp;Ccedil;\
<EFBFBD> 200 c8 &amp;Egrave; <20> 201 c9 &amp;Eacute;\
<EFBFBD> 202 ca &amp;Ecirc; <20> 203 cb &amp;Euml;\
<EFBFBD> 204 cc &amp;Igrave; <20> 205 cd &amp;Iacute;\
<EFBFBD> 206 ce &amp;Icirc; <20> 207 cf &amp;Iuml;\
<EFBFBD> 208 d0 <em>&amp;ETH;</em> <20> 209 d1 &amp;Ntilde;\
<EFBFBD> 210 d2 &amp;Ograve; <20> 211 d3 &amp;Oacute;\
<EFBFBD> 212 d4 &amp;Ocirc; <20> 213 d5 &amp;Otilde;\
<EFBFBD> 214 d6 &amp;Ouml; <20> 215 d7 <em>&amp;times;</em>\
<EFBFBD> 216 d8 &amp;Oslash; <20> 217 d9 &amp;Ugrave;\
<EFBFBD> 218 da &amp;Uacute; <20> 219 db &amp;Ucirc;\
<EFBFBD> 220 dc &amp;Uuml; <20> 221 dd &amp;Yacute;\
<EFBFBD> 222 de &amp;THORN; <20> 223 df &amp;szlig;\
<EFBFBD> 224 e0 &amp;agrave; <20> 225 e1 &amp;aacute;\
<EFBFBD> 226 e2 &amp;acirc; <20> 227 e3 &amp;atilde;\
<EFBFBD> 228 e4 &amp;auml; <20> 229 e5 &amp;aring;\
<EFBFBD> 230 e6 &amp;aelig; <20> 231 e7 &amp;ccedil;\
<EFBFBD> 232 e8 &amp;egrave; <20> 233 e9 &amp;eacute;\
<EFBFBD> 234 ea &amp;ecirc; <20> 235 eb &amp;euml;\
<EFBFBD> 236 ec &amp;igrave; <20> 237 ed &amp;iacute;\
<EFBFBD> 238 ee &amp;icirc; <20> 239 ef &amp;iuml;\
<EFBFBD> 240 f0 &amp;eth; <20> 241 f1 &amp;ntilde;\
<EFBFBD> 242 f2 &amp;ograve; <20> 243 f3 &amp;oacute;\
<EFBFBD> 244 f4 &amp;ocirc; <20> 245 f5 &amp;otilde;\
<EFBFBD> 246 f6 &amp;ouml; <20> 247 f7 <em>&amp;divide;</em>\
<EFBFBD> 248 f8 &amp;oslash; <20> 249 f9 &amp;ugrave;\
<EFBFBD> 250 fa &amp;uacute; <20> 251 fb &amp;ucirc;\
<EFBFBD> 252 fc &amp;uuml; <20> 253 fd &amp;yacute;\
<EFBFBD> 254 fe &amp;thorn; <20> 255 ff &amp;yuml;\
</pre>"
"<br>\n"
"<br>\n"
"<br>\n"
"<a name=rolling></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Rolling the New Index"
"</td></tr></table>\n"
"<br>\n"
"Just because you have indexed a lot of pages does not mean those pages are being searched. If the <b>restrict indexdb for queries</b> switch on the <a href=\"/admin/spider\">Spider Controls</a> page is on for your collection then any query you do may not be searching some of the more recently indexed data. You have two options:"
"<br><br>\n"
"<b>1.</b>You can turn this switch off which will tell Gigablast to search all the files in the index which will give you a realtime search, but, if &lt;indexdbMinFilesToMerge&gt; is set to <i>X</i> in the <a href=#config>gb.conf</a> file, then Gigablast may have to search X files for every query term. So if X is 40 this can destroy your performance. But high X values are indeed useful for speeding up the build time. Typically, I set X to 4 on gigablast.com, but for doing initial builds I will set it to 40."
"<br><br>\n"
"<b>2.</b>The second option you have for making the newer data searchable is to do a <i>tight merge</i> of indexdb. This tells Gigablast to combine the X files into one. Tight merges typically take about 2-4 minutes for every gigabyte of data that is merged. So if all of your indexdb* files are about 50 gigabytes, plan on waiting about 150 minutes for the merge to complete."
"<br><br>\n"
"<b>IMPORTANT</b>: Before you do the tight merge you should do a <b>disk dump</b> which tells Gigablast to dump all data in memory to disk so that it can be merged. In this way you ensure your final merged file will contain *all* your data. You may have to wait a while for the disk dump to complete because it may have to do some merging right after the dump to keep the number of files below &lt;indexdbMinFilesToMerge&gt;."
""
"<br><br>\n"
"Now if you are <a href=#input>interfacing to Gigablast</a> from another program you can use the <b>&rt=[0|1]</b> real time search cgi parameter. If you set this to 0 then Gigablast will only search the first file in the index, otherwise it will search all files."
"<br><br>\n"
""
""
""
""
""
"<a name=catdb></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Building a DMOZ Based Directory"
"</td></tr></table>\n"
"<br>\n"
"<b>How Catdb Works:</b><br>"
"<ul><li>Catdb is used to create a Web Directory based"
" on DMOZ (www.dmoz.org). The actual RDB known as"
" <i>catdb</i> is a set of records containing the urls"
" in the directory and which categories they belong to."
" Catdb is only required at spider time so that the"
" url being spidered can be checked for category"
" information. Generating Catdb requires the"
" gbdmoz.content.dat file generated by the"
" <i>dmozparse</i> program, which in turn requires the"
" DMOZ RDF files content.rdf.u8 and structure.rdf.u8"
" which can be found at http://rdf.dmoz.org/rdf"
" (see below)."
" Sites will have their directory information stored"
" in their TitleRecs to be retrieved at query time."
" <br>"
" <li>To use the Web Directory after it has been"
" created and spidered requires only the Categories"
" Hierarchy. The Hierarchy loads at startup using the"
" gbdmoz.structure.dat file, also created by"
" <i>dmozparse</i>. The Hierarchy is used to lookup"
" the directory hierarchy information while the"
" directory is being browsed by a user. Directory"
" results are generated using special terms (see below)"
" <li>All files used by Catdb and the Hierarchy,"
" including the RDB data itself, are kept in the"
" <i>cat</i> directory located under the root gigablast"
" directory.<br>"
" <li>To browse an active Directory, append Top"
" to the server address in the browser as though it is"
" a folder: http://www.gigablast.com/Top."
" Alternatively, append a known Category:"
" http://www.gigablast.com/Arts/Movies.<br>"
"</ul><br>"
"<b>Before You Get Started:</b><br>"
"<ul><li>Create the <i>dmozparse</i> program.<br>"
" <b>$ make dmozparse</b><br>"
" <li>Copy <i>dmozparse</i> to the <i>cat/</i>"
" directory under the main gigablast directory on host"
" 0.<br>"
"</ul><br>"
"<b>Generating a New Catdb:</b>"
"<ul><li>Download the latest content.rdf.u8 and "
" structure.rdf.u8 files from http://rdf.dmoz.org/rdf"
" into the <i>cat/</i> directory on host 0.<br>"
" <b>$ wget http://rdf.dmoz.org/rdf/"
"content.rdf.u8.gz<br>"
" $ gunzip content.rdf.u8.gz<br>"
" $ wget http://rdf.dmoz.org/rdf/"
"structure.rdf.u8.gz<br>"
" $ gunzip structure.rdf.u8.gz</b><br>"
" <li>Execute <i>dmozparse</i> in the <i>cat</i>"
" directory with the <i>new</i>"
" option to generate the catdb dat files.<br>"
" <b>$ dmozparse new</b><br>"
" <li>Execute the installcat script command on host"
" 0 to distribute the catdb files to all the hosts.<br>"
" <b>$ gb installcat</b><br>"
" <li>Make sure all spiders are stopped and"
" inactive.<br>"
" <li>Goto <i>catdb</i> in the admin section of"
" Gigablast and click \"Generate Catdb.\"<br>"
" <li>Once the command returns, Catdb will be ready"
" for use and spidering.<br>"
"</ul><br>"
"<b>Spidering Urls For New Catdb:</b>"
"<ul><li>Execute <i>dmozparse</i> in the <i>cat</i>"
" directory with the <i>urldump -s</i> option to"
" create the gbdmoz.urls.txt.# files which contain all"
" the urls in DMOZ.<br>"
" <b>$ dmozparse urldump -s</b><br>"
" <li>Move the gbdmoz.urls.txt.# files to the"
" <i>html</i> directory under the main Gigablast"
" directory of host 0.<br>"
" <li>Go to \"add url\" under the admin section"
" of Gigablast.<br>"
" <li><b>IMPORTANT:</b> Uncheck the strip session"
" ids option.<br>"
" <li>In the \"url of a file of urls to add\" box,"
" insert the hostname/ip and http port of host 0"
" followed by one of the gbdmoz.urls.txt.# files."
" Example: http://10.0.0.1:8000/gbdmoz.urls.txt.0<br>"
" <li>Press the \"add file\" button and allow the"
" urls to be added to the spider.<br>"
" <li>Repeat for all the gbdmoz.urls.txt.# files."
"<br>"
"</ul><br>"
"<b>Updating an Existing Catdb With New DMOZ Data:</b>"
"<ul><li>Download the latest content.rdf.u8 and "
" structure.rdf.u8 files from http://rdf.dmoz.org/rdf"
" into the <i>cat/</i> directory on host 0 with the"
" added extension \".new\".<br>"
" <b>$ wget http://rdf.dmoz.org/rdf/"
"content.rdf.u8.gz -O content.rdf.u8.new.gz<br>"
" $ gunzip content.rdf.u8.new.gz<br>"
" $ wget http://rdf.dmoz.org/rdf/"
"structure.rdf.u8.gz -O structure.rdf.u8.new.gz<br>"
" $ gunzip structure.rdf.u8.new.gz</b><br>"
" <li>Execute <i>dmozparse</i> in the <i>cat</i>"
" directory with the <i>update</i>"
" option to generate the catdb dat.new and diff files."
"<br>"
" <b>$ dmozparse update</b><br>"
" <li><b>NOTE:</b> If you wish to spider the new,"
" changed, and removed urls from this update, execute"
" <i>dmozparse</i> with the <i>diffurldump -s</i>"
" option to generate the gbdmoz.diffurls.txt file (See"
" below).<br>"
" <b>$ dmozparse diffurldump -s</b><br>"
" <li>Execute the installnewcat script command on"
" host"
" 0 to distribute the catdb files to all the hosts.<br>"
" <b>$ gb installnewcat</b><br>"
" <li>Make sure all spiders are stopped and"
" inactive.<br>"
" <li>Goto \"catdb\" in the admin section of"
" Gigablast and click \"Update Catdb.\"<br>"
" <li>Once the command returns, Catdb will be ready"
" for use and spidering.<br>"
"</ul><br>"
"<b>Spidering Urls For Updated Catdb:</b>"
"<ul><li>Execute <i>dmozparse</i> in the <i>cat</i>"
" directory with the <i>diffurldump -s</i> option to"
" create the gbdmoz.diffurls.txt.# files which contain"
" all the new, changed, or removed urls in DMOZ.<br>"
" <b>$ dmozparse diffurldump -s</b><br>"
" <li>Move the gbdmoz.diffurls.txt.# files to the"
" <i>html</i> directory under the main Gigablast"
" directory of host 0.<br>"
" <li>Go to \"add url\" under the admin section"
" of Gigablast.<br>"
" <li><b>IMPORTANT:</b> Uncheck the strip session"
" ids option.<br>"
" <li>In the \"url of a file of urls to add\" box,"
" insert the hostname/ip and http port of host 0"
" followed by one of the gbdmoz.diffurls.txt.# files."
" Example: http://10.0.0.1:8000/gbdmoz.diffurls.txt.0"
"<br>"
" <li>Press the \"add file\" button and allow the"
" urls to be added to the spider.<br>"
" <li>Repeat for all the gbdmoz.diffurls.txt.#"
" files."
"<br>"
"</ul><br>"
"<b>Deleting Catdb:</b>"
"<ul><li>Shutdown Gigablast.<br>"
" <li>Delete <i>catdb-saved.dat</i> and all"
" <i>cat/catdb*.dat</i> and <i>cat/catdb*.map</i>"
" files from all hosts.<br>"
" <li>Start Gigablast.<br>"
"</ul><br>"
"<b>Troubleshooting:</b>"
"<ul>"
"<li><b>Dmozparse prints an error saying it could"
" not open a file:</b><br>"
" Be sure you are running dmozparse in the"
" cat directory and that the steps above have been"
" followed correctly so that all the necessary files"
" have been downloaded or created.<br>"
" <li><b>Dmozparse prints an Out of Memory error:"
"</b><br>"
" Some modes of dmozparse can require several"
" hundred megabytes of system memory. Systems with"
" insufficient memory, under heavy load, or lacking"
" a correctly working swap may have problems running"
" dmozparse. Attempt to free up as much memory as"
" possible if this occcurs.<br>"
" <li><b>How to tell if pages are being added"
" with correct directory data:</b><br>"
" All pages with directory data are indexed"
" with special terms utilizing a prefix and sufix."
" The prefixes are listed below and represent a"
" specific feature under which the page was indexed."
" The sufix is always a numerical category ID. To"
" search for one of these terms, simply performa a"
" query with \"prefix:sufix\", i.e. \"gbpdcat:1\" will"
" list all pages under the Top category (or all pages"
" in the entire directory).<br>"
" <ul><li>gbdcat - The page is listed directly"
" under this base category.<br>"
" <li>gbpdcat - The page is listed under this"
" category or any child of this category.<br>"
" <li>gbicat - The page is listed indirectly"
" under this base category, meaning it is a page found"
" under a site listed in the base category.<br>"
" <li>gbpicat - The page is listed indirectly"
" under this category, meaning it is a page found under"
" a site listed under this category or any child of"
" this category.<br>"
" </ul>"
" <li><b>Pages are not being indexed with directory"
" data:</b><br>"
" First check to make sure that sites that are"
" actually in DMOZ are those being added by the"
" spiders. Next check to see if the sites return"
" category information when looked up under the Catdb"
" admin section. If they come back with directory"
" information, the site may just need to be respidered."
" If the lookup does not return category information"
" and all hosts are properly running, Catdb may need"
" to be rebuilt from scratch.<br>"
" <li><b>The Directory shows results but does not"
" show sub-category listings or a page error is"
" returned and no results are shown:</b><br>"
" Make sure the gbdmoz.structure.dat and"
" structure.rdf.u8 files are in the <i>cat</i>"
" directory on every host. Also be sure the current"
" dat files were built from the current rdf.u8 files."
" Check the log to see if Categories was properly"
" loaded from file at startup (grep log# Categories)."
"<br>"
"</ul><br>"
""
""
""
""
""
"<a name=logs></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>The Log System"
"</td></tr></table>\n"
"<br>\n"
"<table>\n"
"<tr>\n"
"<td>Gigablast uses its own format for logging messages, for example,<br>\n"
"<pre>\n"
"1091228736104 0 INIT Gigablast Version 1.234\n"
"1091228736104 0 INIT thread Allocated 435333 bytes for thread stacks.\n"
"1091228736104 0 WARN mem Failed to alloc 360000 bytes.\n"
"1091228736104 0 WARN query Failed to intersect lists. Out of memory.\n"
"1091228736104 0 WARN query Too many words. Query truncated.\n"
"1091228736104 0 INFO build GET http://hohum.com/foobar.html\n"
"1091228736104 0 INFO build http://hohum.com/foobar.html ip=4.5.6.7 : Success\n"
"1091228736104 0 DEBUG build Skipping xxx.com, would hammer IP.\n"
"</pre>\n"
"<br>\n"
"The first field, a large number, is the time in milliseconds since the epoch. This timestamp is useful for evaluating performance.<br>\n"
"<br>\n"
"The second field, a 0 in the above example, is the hostId (from <a href=\"#hosts\">hosts.conf</a>) of the host that logged the message.<br>\n"
"<br>\n"
"The third field, INIT in the first line of the above example, is the type of log message. It can be any of the following:<br>\n"
"<br>\n"
"<table>\n"
"<tr>\n"
"<td>INIT</td>\n"
"<td>Messages printed at the initilization or shutdown of the Gigablast process.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>WARN</td>\n"
"<td>Most messages fall under this category. These messages are usually due to an error condition, like out of memory.</td>\n"
"</tr>\n"
"<td>INFO</td>\n"
"<td>Messages that are given for information purposes only and not indicative of an error condition.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>LIMIT</td>\n"
"<td>Messages printed when a document was not indexed because the document quota specified in the ruleset was breeched. Also, urls that were truncated because they were too long. Or a robots.txt file was too big and was truncated.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>TIME</td>\n"
"<td>Timestamps, logged for benchmarking various processes.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>DEBUG</td>\n"
"<td>Messages used for debugging.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>LOGIC</td>\n"
"<td>Programmer sanity check messages. You should never see these, because they signify a problem with the code logic.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>REMND</td>\n"
"<td>A reminder to the programmer to do something.</td>\n"
"</tr>\n"
"</table>\n"
""
"<br>\n"
"The fourth field is the resource that is logging the message. The resource can be one of the following:"
""
"<table>\n"
"<tr>\n"
"<td>addurls</td>\n"
"<td>Messages related to adding urls. Urls could have been added by the spider or by a user via a web interface.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>admin</td>\n"
"<td>Messages related to administrative functions and tools like the query-reindex tool and the sync tool.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>build</td>\n"
"<td>Messages related to the indexing process.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>conf</td>\n"
"<td>Messages related to <a href=\"#hosts\">hosts.conf</a> or <a href=\"#config\">gb.conf</a>.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>disk</td>\n"
"<td>Messages related to reading or writing to the disk.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>dns</td>\n"
"<td>Messages related to talking with a dns server.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>http</td>\n"
"<td>Messages related to the HTTP server.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>loop</td>\n"
"<td>Messages related to the main loop that Gigablast uses to process incoming signals for network and file communication.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>merge</td>\n"
"<td>Messages related to performing file merges.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>net</td>\n"
"<td>Messages related to the network layer above the udp server. Includes the ping and redirect-on-dead functionality.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>query</td>\n"
"<td>Messages related to executing a query.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>db</td>\n"
"<td>Messages related to a database. Fairly high level.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>spcache</td>\n"
"<td>Messages related to the spider cache which is used to efficiently queue urls from disk.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>speller</td>\n"
"<td>Messages related to the query spell checker.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>thread</td>\n"
"<td>Messages related to the threads class.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>topics</td>\n"
"<td>Messages related to related topics generation.</td>\n"
"</tr>\n"
"<tr>\n"
"<td>udp</td>\n"
"<td>Messages related to the udp server.</td>\n"
"</tr>\n"
"</table>\n"
"<br>\n"
"Finally, the last field, is the message itself."
"<br><br>\n"
"You can turn many messages on and off by using the <a href=\"/master?submenu=1\">Log Controls</a>."
"<br><br>\n"
"The same parameters on the Log Controls page can be adjusted in the <a href=\"#configlog\">gb.conf</a> file."
"<br><br>\n"
"\n"
"\n"
"\n"
""
""
""
""
"<a name=optimizing></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Optimizing\n"
"</td></tr></table>\n"
"<br>\n"
"Gigablast is a fairly sophisticated database that has a few things you can tweak to increase query performance or indexing performance.\n"
"<br><br>\n"
"\n"
"<b>Query Optimizations:</b>\n"
"\n"
"<ul>\n"
"<li> Set <b>restrict indexdb for queries</b> on the \n"
"<a href=\"/admin/spider\">Spider Controls</a> page to YES.\n"
"This parameter can also be controlled on a per query basis using the \n"
"<a href=#rt><b>rt=X</b></a> cgi parm.\n"
"This will decrease freshness of results but typically use\n"
"3 or 4 times less disk seeks.\n"
"\n"
"<li> If you want to spider at the same time, then you should ensure\n"
"that the <b>max spider disk threads</b> parameter on the\n"
"<a href=\"/master\">Master Controls</a> page is set to around 1 \n"
"so the indexing/spidering processes do not hog the disk.\n"
"\n"
"<li> Set Gigablast to read-only mode to true to prevent Gigablast from using \n"
"memory to hold newly indexed data, so that this memory can \n"
"be used for caches. Just set the <b>&lt;readOnlyMode&gt;</b> parameter in your config file to 1.\n"
"\n"
"<li> Increase the indexdb cache size. The <b>&lt;indexdbMaxCacheMem&gt;</b> \n"
"parameter in\n"
"your config file is how many bytes Gigablast uses to store <i>index lists</i>.\n"
"Each word has an associated index list which is loaded from disk when that\n"
"word is part of a query. The more common the word, the bigger its index list.\n"
"By enabling a large indexdb cache you can save some fairly large disk reads.\n"
"\n"
"<li> Increase the clusterdb cache size. The <b>&lt;clusterdbMaxCacheMem&gt;</b>\n"
"parameter in\n"
"your config file is how many bytes Gigablast uses to store cluster records.\n"
"Cluster records are used for site clustering and duplicate removal. Every\n"
"URL in the index has a corresponding cluster record. When a url appears as a \n"
"search result its cluster record must be loaded from disk. Each cluster \n"
"record is about 12 to 16 bytes so by keeping these all in memory you can\n"
"save around 10 disk seeks every query.\n"
"\n"
"<li> Disable site clustering and dup removal. By specifying <i>&sc=0&dr=0</i>\n"
"in your query's URL you ensure that these two services are avoided and no\n"
"cluster records are loaded. You can also turn them off by default on the\n"
"<a href=\"/admin/spiderdb\">Spider Controls</a> page. But if someone explicitly\n"
"specifies <i>&sc=1</i> or <i>&dr=1</i> in their query URL then they will\n"
"override that switch.\n"
"\n"
"<li>If you are experiencing a high average query latency under a high query throughput then consider adding more twins to your architecture. If you do not have any twins, and are serving a large query volume, then data requests tend to clump up onto one particular server at random, slowing everybody else down. If that server has one or more twins available, then its load will be evened out through Gigablast's dynamic load balancing and the average query latency will decrease."
"\n"
"</ul>\n"
"\n"
"<br>\n"
"\n"
"<b>Build Optimizations:</b>\n"
"<ul>\n"
"<li> Set <b>restrict indexdb for spidering</b> on the \n"
"<li> Disable dup checking. Gigablast will not allow any duplicate pages\n"
"from the same domain into the index when this is enabled. This means that\n"
"Gigablast must do about one disk seek for every URL indexed to verify it is\n"
"not a duplicate. If you keep checksumdb all in memory this will not be a\n"
"problem.\n"
"<li> Disable <b>link voting</b>. Gigablast performs at least one disk seek\n"
"to determine who link to the URL being indexed. If it does have some linkers\n"
"then the Cached Copy of each linker (up to 200) is loaded and the corresponding\n"
"link text is extracted. Most pages do not have many linkers so the disk\n"
"load is not too bad. Furthermore, if you do enable link voting, you can\n"
"restrict it to the first file of indexdb, <b>restrict indexdb for \n"
"spidering</b>, to ensure that about one seek is used to determine the linkers.\n"
"<li> Enable <b>use IfModifiedSince</b>. This tells the spider not to do \n"
"anything if it finds that a page being reindexed is unchanged since the last\n"
"time it was indexed. Some web servers do not support the IfModifiedSince tag,\n"
"so Gigablast will compare the old page with the new one to see if anything\n"
"changed. This backup method is not quite as efficient as the first, \n"
"but it can still save ample disk resources.\n"
"<!--<li> Don't let Linux's bdflush flush the write buffer to disk whenever it \n"
"wants. Gigablast needs to control this so it won't perform a lot of reads\n"
"when a write is going on. Try performing a 'echo 1 > /proc/sys/vm/bdflush'\n"
"to make bdflush more bursty. More information about bdflush is available\n"
"in the Linux kernel source Documentation directory in the proc.txt file.-->\n"
"</ul>\n"
"\n"
"<br>\n"
"\n"
"<b>General Optimizations:</b>\n"
"<ul>\n"
"<li> Prevent Linux from unnecessary swapping. Linux will often swap out\n"
"Gigablast pages to satisfy Linux's disk cache. By using the swapoff command\n"
"to turn off swap you can increase performance, but if the computer runs out\n"
"of memory it will start killing processes withouth giving them a chance\n"
"to save their data.\n"
"<!--using Rik van Riel's\n"
"patch, rc6-rmap15j, applied to kernel 2.4.21, you can add the \n"
"/proc/sys/vm/pagecache control file. By doing a \n"
"'echo 1 1 > /proc/sys/vm/pagecache' you tell the kernel to only use 1%% of\n"
"the swap space, so swapping is effectively minimized.-->\n"
"</ul>\n"
"<br>\n"
"\n"
"\n"
"\n"
"\n"
"<a name=config></>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>gb.conf</font></a>\n"
"</td></tr></table>\n"
"<br><br>\n"
"<pre>\n"
//"## This is the IP and port that a user connects to in order to search this\n"
//"## Gigablast network. This should be the same for all gb processes\n"
//"&lt;mainExternalIp&gt; 68.35.105.199&lt;/&gt;\n"
//"&lt;mainExternalPort&gt; 8000&lt;/&gt;\n"
//"\n"
"## Mem available to this process. May be exceeded due to fragmentation.\n"
"&lt;maxMem&gt; 445000000&lt;/&gt;\n"
"\n"
"## Max incoming bandwith to use for spidering, for all hosts combined."
"&lt;maxIncomingKbps&gt; 3000.0&lt;/&gt;\n"
"\n"
"## The maximum number of pages to spider per second, for all hosts combined."
"&lt;maxPagesPerSecond&gt; 20.00&lt;/&gt;\n"
"\n"
"## Max threads for reading spider-related information on disk.\n"
"&lt;spiderMaxDiskThreads&gt; 1&lt;/&gt;\n"
"\n"
"## Max threads for reading big/med/small chunks of spider-related info on disk\n"
"&lt;spiderMaxBigDiskThreads&gt; 1&lt;/&gt;\n"
"&lt;spiderMaxMedDiskThreads&gt; 1&lt;/&gt;\n"
"&lt;spiderMaxSmaDiskThreads&gt; 5&lt;/&gt;\n"
"\n"
"## Max threads for reading query-related information on disk.\n"
"&lt;queryMaxDiskThreads&gt; 20&lt;/&gt;\n"
"\n"
"## Max threads for reading big/med/small chunks of query-related info on disk\n"
"&lt;queryMaxBigDiskThreads&gt; 1&lt;/&gt;\n"
"&lt;queryMaxMedDiskThreads&gt; 3&lt;/&gt;\n"
"&lt;queryMaxSmaDiskThreads&gt; 10&lt;/&gt;\n"
"\n"
"## What are the IP addresses and ports of the DNS servers? Accessed randomly.\n"
"&lt;dns&gt;&lt;ip&gt;68.35.172.5&lt;/&gt;&lt;port&gt;53&lt;/&gt;&lt;/&gt;\n"
"&lt;dns&gt;&lt;ip&gt;68.35.172.6&lt;/&gt;&lt;port&gt;53&lt;/&gt;&lt;/&gt;\n"
"\n"
"## How many bytes should we use for caching DNS replies?\n"
"&lt;dnsMaxCacheMem&gt; 13000&lt;/&gt;\n"
"\n"
"## Should we save/load the DNS reply cache when we exit/start? 1=YES 0=NO\n"
"&lt;dnsSaveCache&gt; 0&lt;/&gt;\n"
"\n"
"## Below the various Gigablast databases are configured.\n"
"## &lt;*dbMaxTreeMem&gt; - mem used for holding new recs\n"
"## &lt;*dbMaxPageCacheMem&gt; - disk page cache mem for this db\n"
"## &lt;*dbMaxCacheMem&gt; - cache mem for holding single recs\n"
"## &lt;*dbMinFilesToMerge&gt; - required # files to trigger merge\n"
"## &lt;*dbSaveCache&gt; - save the rec cache on exit?\n"
"## &lt;*dbMaxCacheAge&gt; - max age for recs in rec cache\n"
"## See that Stats page for a record counts and stats.\n"
"\n"
"## Sitedb holds site-based parsing info. A tagdb record assigns a url or site\n"
"## to a ruleset. Each tagdb record is about 100 bytes or so.\n"
"&lt;tagdbMaxTreeMem&gt; 1200000&lt;/&gt;\n"
"&lt;tagdbMaxPageCacheMem&gt; 200000&lt;/&gt;\n"
"&lt;tagdbMaxCacheMem&gt; 131072&lt;/&gt;\n"
"&lt;tagdbMinFilesToMerge&gt; 2&lt;/&gt;\n"
"\n"
"## Titledb holds the compressed documents that we've indexed.\n"
"&lt;titledbMaxTreeMem&gt; 1000000&lt;/&gt;\n"
"&lt;titledbMaxCacheMem 10485760&lt;/&gt;\n"
"&lt;titledbMinFilesToMerge&gt; 3&lt;/&gt;\n"
"&lt;titledbMaxCacheAge&gt; 86400&lt;/&gt;\n"
"&lt;titledbSaveCache&gt; 0&lt;/&gt;\n"
"\n"
"## Clusterdb caches small records for site clustering and deduping.\n"
"&lt;clusterdbMaxCacheMem&gt; 131072&lt;/&gt;\n"
"&lt;clusterdbSaveCache&gt; 0&lt;/&gt;\n"
"\n"
"## Checksumdb is used for deduping same-site urls at index time.\n"
"&lt;checksumdbMaxTreeMem&gt; 1048576&lt;/&gt;\n"
"&lt;checksumdbMaxCacheMem&gt; 2097152&lt;/&gt;\n"
"&lt;checksumdbMaxPageCacheMem&gt; 2097152&lt;/&gt;\n"
"&lt;checksumdbMinFilesToMerge&gt; 2&lt;/&gt;\n"
"\n"
"## Tfndb holds small records for each url in Titledb.\n"
"&lt;tfndbMaxTreeMem&gt; 5000000&lt;/&gt;\n"
"&lt;tfndbMaxPageCacheMem&gt; 155000000&lt;/&gt;\n"
"&lt;tfndbMinFilesToMerge&gt; 2&lt;/&gt;\n"
"\n"
"## Spiderdb holds urls to be spidered\n"
"&lt;spiderdbMaxTreeMem&gt; 1200000&lt;/&gt;\n"
"&lt;spiderdbMaxCacheMem&gt; 131072&lt;/&gt;\n"
"&lt;spiderdbMaxPageCacheMem&gt; 256000&lt;/&gt;\n"
"&lt;spiderdbMinFilesToMerge&gt; 2&lt;/&gt;\n"
"\n"
"## Robotdb caches robot.txt files.\n"
"&lt;robotdbMaxCacheMem&gt; 131072&lt;/&gt;\n"
"&lt;robotdbSaveCache&gt; 0&lt;/&gt;\n"
"\n"
"## Indexdb holds the terms extracted from spidered documents.\n"
"&lt;indexdbMaxTreeMem&gt; 8000000&lt;/&gt;\n"
"&lt;indexdbMaxCacheMem&gt; 500000&lt;/&gt;\n"
"&lt;indexdbMinFilesToMerge&gt; 4&lt;/&gt;\n"
"&lt;indexdbMaxIndexListAge&gt; 86400&lt;/&gt;\n"
"&lt;indexdbTruncationLimit&gt; 100000&lt;/&gt;\n"
"&lt;indexdbSaveCache&gt; 0&lt;/&gt;\n"
"&lt;onlyAddUnchangedTermIds&gt; 1&lt;/&gt;"
"\n"
"## The HTTP server info\n"
"## Maximum simultaneous connections. Excess will be closed.\n"
"&lt;httpMaxSockets&gt; 500&lt;/&gt;\n"
"&lt;httpMaxSendBufSize&gt; 32768&lt;/&gt;\n"
"\n"
"## Bytes to use for caching search result pages.\n"
"&lt;maxPageCacheMem&gt; 1000000&lt;/&gt;\n"
"## Maximum age in seconds.\n"
"&lt;maxPageCacheAge&gt; 14400&lt;/&gt;\n"
"&lt;resultsSaveCache&gt; 0&lt;/&gt;\n"
"\n"
"## Max linkers to a doc we sample to determine quality.\n"
"&lt;maxIncomingLinksToSample&gt; 100&lt;/&gt;\n"
"\n"
"## Percent more to weight phrases than single words.\n"
"&lt;queryPhraseWeight&gt; 100&lt;/&gt;\n"
"\n"
"## Maximum weight one query term can have relative to another in the query.\n"
"&lt;queryMaxMultiplier&gt; 10.0&lt;/&gt;\n"
"\n"
"## Sync info\n"
"&lt;syncIndexdb&gt; 1&lt;/&gt;\n"
"&lt;syncTitledb&gt; 1&lt;/&gt;\n"
"&lt;syncSpiderdb&gt; 1&lt;/&gt;\n"
"&lt;syncChecksumdb&gt; 1&lt;/&gt;\n"
"&lt;syncSitedb&gt; 1&lt;/&gt;\n"
"&lt;syncDoUnion&gt; 1&lt;/&gt;\n"
"&lt;syncDryRun&gt; 0&lt;/&gt;\n"
"&lt;syncBytesPerSecond&gt; 100000000&lt;/&gt;\n"
"\n"
"## Is spidering enabled for this host? 1=YES 0=NO\n"
"&lt;spideringEnabled&gt; 0&lt;/&gt;\n"
"\n"
"## Is injection enabled for this host? 1=YES 0=NO\n"
"&lt;injectionEnabled&gt; 1&lt;/&gt;\n"
"\n"
"## Can others add urls to a collection? 1=YES 0=NO\n"
"&lt;addUrlEnabled&gt; 0&lt;/&gt;\n"
"\n"
"## Serve ads from ah-ha? 1=YES 0=NO\n"
"&lt;adFeedEnabled&gt; 0&lt;/&gt;\n"
"\n"
"## Can non-admins connect to this webserver? 1=YES 0=NO\n"
"&lt;httpServerEnabled&gt; 1&lt;/&gt;\n"
"\n"
"## Send an email when a host is detected as dead? 1=YES 0=NO\n"
"&lt;sendEmailAlerts&gt; 0&lt;/&gt;\n"
"\n"
"## Allow software interrupts? 1=YES 0=NO\n"
"&lt;allowAsyncSignals&gt; 0&lt;/&gt;\n"
"\n"
"## Read only mode does not allow spidering. 1=YES 0=NO\n"
"&lt;readOnlyMode&gt; 0&lt;/&gt;\n"
"\n"
"## Use /etc/hosts file to resolve hostnames? 1=YES 0=NO\n"
"&lt;useEtcHosts&gt; 0&lt;/&gt;\n"
"\n"
"## Restrict merging to one host per token group? Hosts that use the same\n"
"## disk and mirror hosts are generally in the same token group so that only one\n"
"## host in the group can be doing a merge at a time. This prevents query\n"
"## response time from suffering too much. 1=YES 0=NO\n"
"&lt;useMergeToken&gt; 0&lt;/&gt;\n"
"\n"
"## If this is true we do not retrieve data from the network if we have it\n"
"## local. Useful if network is slow or drives are fast. 1=YES 0=NO\n"
"&lt;preferLocalReads&gt; 0&lt;/&gt;\n"
"\n"
"## If this is true all writes are synchronous. 1=YES 0=NO\n"
"&lt;flushWrites&gt; 1&lt;/&gt;\n"
"\n"
"## Spell checking requires considerably more memory, so only a few hosts should\n"
"## have this enabled if possible. 1=YES 0=NO\n"
"&lt;doSpellChecking&gt; 1&lt;/&gt;\n"
""
"## The User-Agent field used by the Gigablast spider.\n"
"&lt;spiderUserAgent&gt; Gigabot/1.0&lt;/&gt;\n"
""
"## Try to save unsaved in-memory data to disk every X minutes.\n"
"&lt;autoSaveFrequency&gt; 15&lt;/&gt;\n"
"</pre><a name=configlog></><pre>\n"
"## Log Controls\n"
"&lt;logHttpRequests&gt; 1&lt;/&gt;\n"
"&lt;logSpideredUrls&gt; 1&lt;/&gt;\n"
"&lt;logInfo&gt; 1&lt;/&gt;\n"
"&lt;logNetCongestion&gt; 0&lt;/&gt;\n"
"&lt;logLimits&gt; 0&lt;/&gt;\n"
"&lt;logDebugAddurl&gt; 0&lt;/&gt;\n"
"&lt;logDebugAdmin&gt; 0&lt;/&gt;\n"
"&lt;logDebugBuild&gt; 0&lt;/&gt;\n"
"&lt;logDebugDb&gt; 0&lt;/&gt;\n"
"&lt;logDebugDisk&gt; 0&lt;/&gt;\n"
"&lt;logDebugHttp&gt; 0&lt;/&gt;\n"
"&lt;logDebugLoop&gt; 0&lt;/&gt;\n"
"&lt;logDebugNet&gt; 0&lt;/&gt;\n"
"&lt;logDebugQuery&gt; 0&lt;/&gt;\n"
"&lt;logDebugSpeller&gt; 0&lt;/&gt;\n"
"&lt;logDebugTcp&gt; 0&lt;/&gt;\n"
"&lt;logDebugThread&gt; 0&lt;/&gt;\n"
"&lt;logDebugTopics&gt; 0&lt;/&gt;\n"
"&lt;logDebugUdp&gt; 0&lt;/&gt;\n"
"&lt;logTimingBuild&gt; 0&lt;/&gt;\n"
"&lt;logTimingDb&gt; 0&lt;/&gt;\n"
"&lt;logTimingNet&gt; 0&lt;/&gt;\n"
"&lt;logTimingQuery&gt; 0&lt;/&gt;\n"
"&lt;logTimingTopics&gt; 0&lt;/&gt;\n"
"&lt;logReminders&gt; 0&lt;/&gt;\n"
"\n"
"</pre>\n"
"\n"
"\n"
"<a name=hosts></>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>hosts.conf</font></a>\n"
"</td></tr></table>\n"
"<br><br>\n"
"Every gb process uses the same hosts.conf file. The hosts.conf file describes the hosts (gb processes) participating in the network.\n"
"Each line in this file is a host entry. The number of participating hosts must be a power of 2. Each host entry uses the following fields: <br><br>\n"
"<table cellpadding=3>\n"
"<tr><td><b>ID</b></td><td>Each host has a unique id. The ids must be contiguous.</td></tr>\n"
"<tr><td><b>IP</b></td><td>Each host has an IP. If you are running multiple hosts on the same computer they may all use the same IP.</td></tr>\n"
"<tr><td><b>LINKIP</b></td><td>This is the IP of this host as viewed externally. It may or may not be different from the internal IP. It is only used for generating absolute (non-relative) links for &lt;a href&gt; tags on dynamic HTML pages.\n"
"<tr><td><b>UDP1</b></td><td>This is the low priority udp port used by the host. Hosts on the same computer must have different ports. Port numbers must be above 2000 or so, because only root has permission to use those ports.</td></tr>\n"
"<tr><td><b>UDP2</b></td><td>This is the high priority udp port used by the host. Hosts on the same computer must have different ports. Port numbers must be above 2000 or so, because only root has permission to use those ports.</td></tr>\n"
"<tr><td><b>DNS</b></td><td>This is the client port we use locally when talking to the dns server.</td></tr>\n"
"<tr><td><b>HTTP</b></td><td>This is the HTTP port used by the host. To avoid conflicts, hosts on the same computer must have different ports. Port numbers must be above 2000 or so, because only root has permission to use those ports.</td></tr>\n"
"<tr><td><b>IDE</b></td><td>The IDE channel number that the host uses. Hosts on the same computer that share the same IDE bus must have this number be the same.</td></tr>\n"
"<tr><td><b>GRP</b></td><td>The redundancy group number to which the host belongs. Hosts that are mirror images (twins) of each other have the same redundancy group number.</td></tr>\n"
"<tr><td><b>DIR</b></td><td>The working directory where the host stores all files related to the gb process.</td></tr>\n"
"</table>\n"
"\n"
"<br>\n"
"<b>IMPORTANT:</b> The group IDS in the hosts.conf must be strictly "
"increasing, at least up until it hits a host in group #0 again."
"<br>\n"
"Here is a sample hosts.conf file for a network of 8 hosts running on 8 computers:<br><br>\n"
"\n"
"<pre>\n"
"#ID IP LINKIP UDP1 UDP2 DNS HTTP IDE GRP DIR\n"
"\n"
"0 64.62.142.231 64.62.142.231 9000 10000 6000 8000 0 0 /a\n"
"1 64.62.142.233 64.62.142.233 9000 10000 6000 8000 0 1 /a\n"
"2 64.62.142.235 64.62.142.235 9000 10000 6000 8000 0 2 /a\n"
"3 64.62.142.237 64.62.142.237 9000 10000 6000 8000 0 3 /a\n"
"4 64.62.142.239 64.62.142.239 9000 10000 6000 8000 0 0 /a\n"
"5 64.62.142.241 64.62.142.241 9000 10000 6000 8000 0 1 /a\n"
"6 64.62.142.244 64.62.142.244 9000 10000 6000 8000 0 2 /a\n"
"7 64.62.142.246 64.62.142.246 9000 10000 6000 8000 0 3 /a\n"
"</pre>\n"
"<br>\n"
"</pre>\n"
"\n"
""
""
/*
"<a name=ruleset></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Sample Ruleset File</font></a>\n"
"</td></tr></table>\n"
"<br><br>\n"
"A <b>ruleset</b> is a set of rules used for spidering and indexing the content of a URL. This <a href=\"#classifying\">section</a> talks about how to assign a ruleset to a URL. Each ruleset is a file in Gigablast's working directory with a file name like tagdb*.xml, where '*' is a number.\n"
"<br><br>\n"
"<b>IMPORTANT:</b> Do not change the <a href=\"#indexingsection\">indexing section</a> or the &lt;linksUnbanned&gt;, &lt;linksClean&gt; or &lt;linksDirty&gt; tags of a ruleset file if some documents in the index were indexed with that ruleset file. To do so might create some unrepairable data corruption.\n"
"<br><br>\n"
"The following is an example ruleset for a particular URL (\"the URL\"):\n"
"\n"
"<pre>\n"
"\n"
"# This is the unique name of the ruleset which is used for \n"
"# display in drop-down menus in administrative, web-based GUIs.\n"
"<b>&lt;name&gt;default&lt;/&gt;</b>\n"
"<a name=\"qualitysection\"></a>\n"
"\n"
"# This is the accompanying description displayed on the Sitedb tool and\n"
"# URL Filters pages.\n"
"<b>&lt;description&gt;This is the default ruleset used for most urls.&lt;/&gt;</b>\n"
"\n"
"# If a ruleset is no longer actively used, it is not deleted, but retired.\n"
"# Retired rulesets are not displayed to spam assassins on the Sitedb tool \n"
"# and URL Filters pages.\n"
"<b>&lt;retired&gt;no&lt;/&gt;</b>\n"
"\n"
"##############################################################################\n"
"# \n"
"# The Quality Section. This section of the ruleset is used to determine the \n"
"# QUALITY of the URL. The quality ranges from 0%% to over 100%% and is used to \n"
"# influence many other things in this file. A quality of 30%% is considered to \n"
"# be the quality of the average web page.\n"
"#\n"
"##############################################################################\n"
"\n"
"# The quality of the URL will not be allowed to exceed this value.\n"
"<b>&lt;maxQuality&gt;100&lt;/&gt;</b> (default 100%%)\n"
"\n"
"# This is the unadjusted quality of the URL. The maps below may modify it to\n"
"# get the final quality of the URL.\n"
"<b>&lt;baseQuality&gt;30&lt;/&gt;</b> (default 30%%)\n"
"<a name=\"map\"></a>\n"
"# Now for some maps. Each map is a graph that maps one thing to another.\n"
"# The first thing listed is the X component. All X components are listed first\n"
"# followed by their corresponding Y components. Taken together they create a\n"
"# set of points on the Cartesian graph. In this way Gigablast can map an\n"
"# arbitrary value in the domain (X axis) to its corresponding value in the\n"
"# image (Y axis). The X components must be in ascending order.\n"
"#\n"
"# The tag name of each map component, usually something like 'numLinks13',\n"
"# always contains a number, in the case of this example it is 13. These numbers\n"
"# are just used to ensure that the tag name is unique, nothing more.\n"
"#\n"
"# Gigablast linearly interpolates between the supplied points in the graph in \n"
"# order to map X values that are not explicitly given in the graph. The \n"
"# interpolation function extends horizontally from the first/last points with \n"
"# the same image value of the first/last point.\n"
"#\n"
"# A map can have up to 32 defined points, but typically just 5 are used.\n"
"\n"
"# In this map the number of incoming links is mapped to a quality BOOST for the\n"
"# URL. Only one incoming link is counted per top 2 bytes of the ip address\n"
"# (most significant 2 bytes of the IP) if \"restrict link voting\" is\n"
"# turned on in the Spider Controls. This helps prevent spam. This boost\n"
"# is added to the baseQuality, not multiplied.\n"
"<b>&lt;numLinks11&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;numLinks12&gt; 5 &lt;/&gt;</b>\n"
"<b>&lt;numLinks13&gt; 10 &lt;/&gt;</b>\n"
"<b>&lt;numLinks14&gt; 20 &lt;/&gt;</b>\n"
"<b>&lt;numLinks15&gt; 50 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost11&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost12&gt; 5 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost13&gt; 10 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost14&gt; 15 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost15&gt; 20 &lt;/&gt;</b>\n"
"\n"
"# This map is like the above map, but the SUM of the baseQuality of all \n"
"# linkers is mapped to a baseQuality boost for the URL. The boost is added to \n"
"# the baseQuality, not multiplied.\n"
"<b>&lt;linkQualitySum21&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;linkQualitySum22&gt; 50 &lt;/&gt;</b>\n"
"<b>&lt;linkQualitySum23&gt; 100 &lt;/&gt;</b>\n"
"<b>&lt;linkQualitySum24&gt; 150 &lt;/&gt;</b>\n"
"<b>&lt;linkQualitySum25&gt; 200 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost21&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost22&gt; 5 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost23&gt; 10 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost24&gt; 15 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost25&gt; 20 &lt;/&gt;</b>\n"
"\n"
"# This map is like the above map, but the quality of the root page of the URL\n"
"# is mapped to a baseQuality boost for the URL. The boost is added to the \n"
"# baseQuality, not multiplied. If the URL is a root URL then the rootQuality\n"
"# for purposes of just this map is assumed to be 30%% to prevent explosive\n"
"# feedback.\n"
"<b>&lt;rootQuality31&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;rootQuality32&gt; 50 &lt;/&gt;</b>\n"
"<b>&lt;rootQuality33&gt; 100 &lt;/&gt;</b>\n"
"<b>&lt;rootQuality34&gt; 200 &lt;/&gt;</b>\n"
"<b>&lt;rootQuality35&gt; 500 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost31&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost32&gt; 5 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost33&gt; 10 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost34&gt; 15 &lt;/&gt;</b>\n"
"<b>&lt;qualityBoost35&gt; 20 &lt;/&gt;</b>\n"
"\n"
"\n"
"##############################################################################\n"
"#\n"
"# The Quota Section. How many documents should we index from the site of the \n"
"# URL? Quotas can be turned on/off for old/new URLs via the \"Spider Controls\" \n"
"# page.\n"
"#\n"
"##############################################################################\n"
"\n"
"# How many docs from the site of the URL should we allow into the index?\n"
"# A site is typically just the hostname of the URL, but, if a record for\n"
"# the URL exists in tagdb, then the site of that record will be the site.\n"
"# Use -1 for no max. \n"
"<b>&lt;maxDocs&gt;20000&lt;/&gt;</b> (default -1)\n"
"\n"
"# This map maps the quality of the root page of the URL to a quota boost.\n"
"# The boost can be negative. A boost of -100%% makes the quota 0.\n"
"# The base quota is given by the &lt;maxDocs&gt; field above.\n"
"<b>&lt;rootQuality71&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;rootQuality72&gt; 30 &lt;/&gt;</b>\n"
"<b>&lt;rootQuality73&gt; 50 &lt;/&gt;</b>\n"
"<b>&lt;rootQuality74&gt; 60 &lt;/&gt;</b>\n"
"<b>&lt;rootQuality75&gt; 70 &lt;/&gt;</b>\n"
"<b>&lt;quotaBoost71&gt; -100 &lt;/&gt;</b>\n"
"<b>&lt;quotaBoost72&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;quotaBoost73&gt; 100 &lt;/&gt;</b>\n"
"<b>&lt;quotaBoost74&gt; 200 &lt;/&gt;</b>\n"
"<b>&lt;quotaBoost75&gt; 300 &lt;/&gt;</b>\n"
"\n"
"# Like the above map, but the quality of the URL is mapped to a quota boost.\n"
"# The quota boost is multiplied by the &lt;maxDocs&gt; number and then added\n"
"# to it.\n"
"<b>&lt;quality81&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;quality82&gt; 30 &lt;/&gt;</b>\n"
"<b>&lt;quality83&gt; 50 &lt;/&gt;</b>\n"
"<b>&lt;quality84&gt; 60 &lt;/&gt;</b>\n"
"<b>&lt;quality85&gt; 70 &lt;/&gt;</b>\n"
"<b>&lt;quotaBoost81&gt; -100 &lt;/&gt;</b>\n"
"<b>&lt;quotaBoost82&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;quotaBoost83&gt; 100 &lt;/&gt;</b>\n"
"<b>&lt;quotaBoost84&gt; 200 &lt;/&gt;</b>\n"
"<b>&lt;quotaBoost85&gt; 300 &lt;/&gt;</b>\n"
"\n"
"##############################################################################\n"
"#\n"
"# The Spider Section. The following parameters control how the URL is spidered.\n"
"# Spidering can be turned on/off as a whole or for various spider priority \n"
"# queues via the Spider Controls page. Many other parameters exist there as \n"
"# well.\n"
"#\n"
"##############################################################################\n"
"\n"
"# How long to wait to respider for the first time.\n"
"# This is in DAYS. This tag overrides Spider Controls if present.\n"
"&lt;firstRespiderWait&gt;3600&lt;/&gt; (default is to omit this tag)\n"
"\n"
//"# How long to wait to respider if there was an error.\n"
//"# This is in DAYS. This tag overrides Spider Controls if present.\n"
//"&lt;errorRespiderWait&gt;3600&lt;/&gt; (default is to omit this tag)\n"
//"\n"
"# What is the minimum amount of time we should wait before re-spidering a URL?\n"
"# Re-spider frequency is usually intelligently determined using a bisection\n"
"# method based on the update frequency of the URL.\n"
"# This is in seconds. default = 1 day = 24*60*60 = 86400.\n"
"<b>&lt;minRespiderWait&gt;86400&lt;/&gt;</b> (default 86400)\n"
"\n"
"# What is the maximum amount of time we should wait before re-spidering a URL?\n"
"# Re-spider frequency is usually intelligently determined using a bisection\n"
"# method based on the update frequency of the URL.\n"
"# This is in seconds. default = 90 days = 90*24*60*60 = 7776000.\n"
"<b>&lt;maxRespiderWait&gt;7776000&lt;/&gt;</b> (default 7776000)\n"
"\n"
"# What spider frequency in days should this URL be assigned?\n"
"# If this is -1 then the re-spider frequency is intelligently determined using \n"
"# a bisection method based on the update frequency of the URL.\n"
"# This is not yet supported.\n"
"# &lt;spiderFrequency&gt;-1&lt;/&gt;\n"
"\n"
"# What <a href=\"#spiderqueue\">spider priority</a> should this URL be assigned?\n"
"# Use -1, the default, to leave unspecified. If not assigned by a matching\n"
"# regular expression, it may be determined by the spider priority of the\n"
"# page from which it was harvested as a link, minus one.\n"
"# This is not yet supported.\n"
"# &lt;spiderPriority&gt;-1&lt;/&gt; (default -1)\n"
"\n"
"# What is the min/max spider priority the URL should be assigned.\n"
"# Priorities range from 0 up to 7. (see &lt;spiderPriority&gt; tag above)\n"
"# This is not yet supported.\n"
"#&lt;spiderMinPriority&gt;0&lt;/&gt; (default 0)\n"
"#&lt;spiderMaxPriority&gt;5&lt;/&gt; (default 7)\n"
"\n"
"# What <a href=\"#spiderqueue\">spider priority</a> should links harvested on the URL's page be assigned?\n"
"# Priorities range from 0 up to 7.\n"
"# -1, the default, means to use the spider priority of the URL minus one.\n"
"# This results in a breadth first spidering algorithm until the URL is\n"
"# from the priority 0 spider queue, in which case, the harvested links will\n"
"# also be assigned to the priority 0 queue.\n"
"<b>&lt;spiderLinkPriority&gt;-1&lt;/&gt;</b> (default -1)\n"
"\n"
"# Should we spider links for the URL? If \"spider links\" is toggled off on the \n"
"# Spider Controls page then this will *not* override.\n"
"<b>&lt;spiderLinks&gt;yes&lt;/&gt;</b> (default yes)\n"
"\n"
"# Should we only harvest links from the same host of the URL? \n"
"# If url is just a domain, then the www hostname is allowed as well.\n"
"# This overrides the same control on Spider Controls page, so leave it \n"
"# out if you do not want to override that control. This is primarily used "
"# good directory sites that have the power to unban soft banned sites, and "
"# such unbanned sites are then only permitted to harvest internal links.\n"
"#<b>&lt;spiderLinksFromSameHostOnly&gt;no&lt;/&gt;</b> (default is to omit this tag)\n"
"\n"
"##############################################################################\n"
"#\n"
"# The Classification Section. How is the URL classified?\n"
"#\n"
"##############################################################################\n"
"\n"
"\n"
"# If the URL's quality is at or below this, then it will be checked for adult \n"
"# content.\n"
"<b>&lt;maxQualityForAdultDetect&gt;0&lt;/&gt;</b> (default 0%%)\n"
"\n"
"# Do links from the URL point to clean pages?\n"
"<b>&lt;linksClean&gt;no&lt;/&gt;</b> (default no)\n"
"\n"
"# Do links from the URL point to clean pages?\n"
"<b>&lt;linksDirty&gt;no&lt;/&gt;</b> (default no)\n"
"\n"
"# Is the URL adult-oriented?\n"
"<b>&lt;isAdult&gt;no&lt;/&gt;</b> (default no)\n"
"\n"
"# Is the URL banned from the index? The default is no.\n"
"# If it is banned it will not be indexed. If it is already indexed then it\n"
"# will be removed from the index the next time it is respidered/reinjected.\n"
"<b>&lt;isBanned&gt;no&lt;/&gt;</b> (default no)\n"
"\n"
"# Can the URL be unbanned? If the URL's &lt;isBanned&gt; tag is set to yes,\n"
"# and this tag is set to yes, then the URL is said to be \"soft banned\".\n"
"# If another URL links to the soft banned URL and that\n"
"# other URL is indexed with &lt;linksUnbanned&gt;yes&lt; in its ruleset then\n"
"# it will UNban the URL. This is useful for doing liberal banning but relying \n"
"# on a directory site like dmoz.org to unban URLs that should not have been \n"
"# banned.\n"
"<b>&lt;canBeUnbanned&gt;no&lt;/&gt;</b> (default yes)\n"
"\n"
"# See above description for &lt;canBeUnbanned&gt; tag for how this works.\n"
"<b>&lt;linksUnbanned&gt;no&lt;/&gt;</b> (default no)\n"
"\n"
"# Should we ban the DOMAINS of the the links in the URL's content. The ban \n"
"# from the URL expires if the URL is removed from the index.\n"
"<b>&lt;linksBanned&gt;no&lt;/&gt;</b> (default no)\n"
"\n"
"# What ruleset should those URLs that the URL links to use? \n"
"# Specify it by name. This is a useful way of assigning a URL to a ruleset.\n"
"# This is not yet supported.\n"
"# &lt;rulesetOfLinks&gt;special&lt;/&gt;\n"
"\n"
"##############################################################################\n"
"#\n"
"# The Filter Section tells Gigablast what to allow into the index.\n"
"#\n"
"##############################################################################\n"
"\n"
"# If the URL's quality is LESS THAN this it will not be indexed. If the URL is\n"
"# being reindexed then it will be removed from the index.\n"
"<b>&lt;minQualityToIndex&gt;0&lt;/&gt;</b> (default 0%%)\n"
"\n"
"# Allow URLs ending in .cgi or URLs containing ?'s into the index?\n"
"<b>&lt;allowCgiUrls&gt;yes&lt;/&gt;</b> (default yes)\n"
"\n"
"# Allow URLs with no canonical domain name into the index?\n"
"<b>&lt;allowIpUrls&gt;yes&lt;/&gt;</b> (default yes)\n"
"\n"
"# Delete 404'ed documents from the index?\n"
"# If you are making a historical index, you may want to set this to no.\n"
"<b>&lt;delete404s&gt;yes&lt;/&gt;</b> (default yes)\n"
"\n"
"# Should the URL be indexed if it is adult-oriented? \n"
"<b>&lt;allowAdultContent&gt;yes&lt;/&gt;</b> (default yes)\n"
"\n"
"# Index the URL even if it is a duplicate of another page from the same site?\n"
"# This overrides the \"deduping enabled\" switch in the Spider Controls,\n"
"# so omit this tag to rely solely on that Spider Controls switch.\n"
"<b>&lt;indexDupContent&gt;no&lt;/&gt;</b> (default is to omit this tag)\n"
"\n"
"# Should the checksum hash be computed just from the indexed words? If this\n"
"# is true then pages from the same site will be detected as dups more\n"
"# often. Useful for newspaper articles where we only index the content of\n"
"# the article. Also, it is independent of the order of the words. This\n"
"# checksum is also used to see if the content of the page has changed in\n"
"# order to set the next respider date for intelligent respidering.\n"
"<b>&lt;useLooseChecksums&gt;no&lt;/&gt;</b> (default is no)\n"
"\n"
"# Index document for sort or constrain by date. Almost doubles disk space.\n"
"<b>&lt;indexDate&gt;yes&lt;/&gt;</b> (default yes)\n"
"\n"
"# # If the url does not get indexed should we still keep it scheduled to be\n"
"# be spidered again later in spiderdb? Handy for seed pages, like good \n"
"# directory pages that link to the stuff you want to index.\n"
"<b>&lt;keepUnindexedUrls&gt;no&lt;/&gt;</b> (default no)\n"
"\n"
"# Index documents without dollar signs. Special case for shopping index.\n"
"<b>&lt;needDollarSign&gt;no&lt;/&gt;</b> (default no)\n"
//"\n"
//"# Does the url need to contain back-to-back digits in its path in order to\n"
//"# be indexed?\n"
//"<b>&lt;needNumbersInUrl&gt;no&lt;/&gt;</b> (default no)\n"
"\n"
"# If date on page is older than this many days, do not index.\n"
"# Omit this tag to default to the value in Spider Controls page.\n"
"# 0.0 says to index all documents regardless of their extracted date.\n"
"# Good directory sites usually have this set to 0.0 for the news collection.\n"
"<b>&lt;daysBeforeNowToIndex&gt;0.0&lt;/&gt;</b> (default is to omit this tag)\n"
"<a name=\"linktextsection\"></a>\n"
"##############################################################################\n"
"#\n"
"# The Link Text Section. When a URL is indexed, Gigablast will determine what\n"
"# other URLs link to it and harvest the relevant link text from each of those \n"
"# URLs. That link text is then indexed as if it occurred on the URL's page \n"
"# itself, but it is not subject to <a href=\"#spam\">spam detection</a>. See the \n"
"# section on <a href=\"#linktext\">link text</a> for more about how link text is \n"
"# indexed and what controls are available in the administrative interface.\n"
"#\n"
"##############################################################################\n"
"\n"
"# Should we index the URL's incoming link text as if it were on the page?\n"
"<b>&lt;indexIncomingLinkText&gt;yes&lt;/&gt;</b> (default yes)\n"
"\n"
"# This maps the URL's quality to a weight on the score of its OUTGOING link \n"
"# text. The score of the terms in the link text is multiplied by this weight. \n"
"# If the URL links to nothing then this is useless. Currently we limit \n"
"# link text to up to 256 chars in LinkInfo.cpp.\n"
"<b>&lt;quality41&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;quality42&gt; 30 &lt;/&gt;</b>\n"
"<b>&lt;quality43&gt; 50 &lt;/&gt;</b>\n"
"<b>&lt;quality44&gt; 70 &lt;/&gt;</b>\n"
"<b>&lt;quality45&gt; 85 &lt;/&gt;</b>\n"
"<b>&lt;linkTextScoreWeight41&gt; 25 &lt;/&gt;</b>\n"
"<b>&lt;linkTextScoreWeight42&gt; 200 &lt;/&gt;</b>\n"
"<b>&lt;linkTextScoreWeight43&gt; 250 &lt;/&gt;</b>\n"
"<b>&lt;linkTextScoreWeight44&gt; 275 &lt;/&gt;</b>\n"
"<b>&lt;linkTextScoreWeight45&gt; 300 &lt;/&gt;</b>\n"
"\n"
"# This maps the number of words in the link text of a link to a boost on the \n"
"# score weight of that link text. The score of the terms in the link text is \n"
"# multiplied by this weight. Currently we limit link text to 256 chars in \n"
"# LinkInfo.cpp.\n"
"<b>&lt;linkTextNumWords61&gt; 3 &lt;/&gt;</b>\n"
"<b>&lt;linkTextNumWords62&gt; 6 &lt;/&gt;</b> \n"
"<b>&lt;linkTextNumWords63&gt; 9 &lt;/&gt;</b> \n"
"<b>&lt;linkTextNumWords64&gt; 12 &lt;/&gt;</b> \n"
"<b>&lt;linkTextScoreWeight61&gt; 150 &lt;/&gt;</b>\n"
"<b>&lt;linkTextScoreWeight62&gt; 80 &lt;/&gt;</b> \n"
"<b>&lt;linkTextScoreWeight63&gt; 50 &lt;/&gt;</b> \n"
"<b>&lt;linkTextScoreWeight64&gt; 25 &lt;/&gt;</b> \n"
"\n"
"# This maps the URL's quality to a maximum score for the terms in the link \n"
"# text. 100%% is the maximum 'maximum score'.\n"
"<b>&lt;quality51&gt; 0 &lt;/&gt;</b>\n"
"<b>&lt;quality52&gt; 15 &lt;/&gt;</b>\n"
"<b>&lt;quality53&gt; 25 &lt;/&gt;</b>\n"
"<b>&lt;quality54&gt; 45 &lt;/&gt;</b>\n"
"<b>&lt;quality55&gt; 75 &lt;/&gt;</b>\n"
"<b>&lt;linkTextMaxScore51&gt; 100 &lt;/&gt;</b>\n"
"<b>&lt;linkTextMaxScore52&gt; 100 &lt;/&gt;</b>\n"
"<b>&lt;linkTextMaxScore53&gt; 100 &lt;/&gt;</b>\n"
"<b>&lt;linkTextMaxScore54&gt; 100 &lt;/&gt;</b>\n"
"<b>&lt;linkTextMaxScore55&gt; 100 &lt;/&gt;</b>\n"
"<a name=\"indexingsection\"></a>\n"
"##############################################################################\n"
"#\n"
"# The Indexing Section. What parts of the document should be indexed and how?\n"
"# <b>IMPORTANT:</b> Do not change this section if some documents in the index \n"
"# were indexed with this ruleset file. To do so might create some unrepairable\n"
"# data corruption.\n"
"#\n"
"##############################################################################\n"
"\n"
"# Should Gigablast index site:, subsite:, url:, suburl:, ip: or link: terms \n"
"# of the URL respectively?\n"
"<b>&lt;indexSite&gt; yes&lt;/&gt;</b> (default yes) site: terms \n"
"<b>&lt;indexUrl&gt; yes&lt;/&gt;</b> (default yes) url: terms\n"
"<b>&lt;indexSubUrl&gt; yes&lt;/&gt;</b> (default yes) suburl: terms\n"
"<b>&lt;indexIp&gt; yes&lt;/&gt;</b> (default yes) ip: terms\n"
"<b>&lt;indexLinks&gt; yes&lt;/&gt;</b> (default yes) link:/href: terms\n"
"\n"
"# This is used only for news collections for doing automatic "
"categorization.\n"
"<b>&lt;indexNewsTopic&gt; yes&lt;/&gt;</b> (default no) newstopic: terms\n"
"\n"
"# This maps the URL's quality to a spam threshold, X. If more than X%% of\n"
"# the words in the document are spammed (repeated in a pattern) to some\n"
"# degree then all of the words will be indexed with a minimum score.\n"
"<b>&lt;quality61&gt; 30 &lt;/&gt;</b>\n"
"<b>&lt;quality62&gt; 40 &lt;/&gt;</b>\n"
"<b>&lt;quality63&gt; 50 &lt;/&gt;</b>\n"
"<b>&lt;quality64&gt; 70 &lt;/&gt;</b>\n"
"<b>&lt;quality65&gt; 90 &lt;/&gt;</b>\n"
"<b>&lt;maxPercentSpammed1&gt; 6 &lt;/&gt;</b>\n"
"<b>&lt;maxPercentSpammed2&gt; 8 &lt;/&gt;</b>\n"
"<b>&lt;maxPercentSpammed3&gt; 10 &lt;/&gt;</b>\n"
"<b>&lt;maxPercentSpammed4&gt; 20 &lt;/&gt;</b>\n"
"<b>&lt;maxPercentSpammed5&gt; 30 &lt;/&gt;</b>\n"
"\n"
"<a name=\"indexblock\"></># Gigablast can index the various parts of a document differently. Each\n"
"# part of the document can have its own set of indexing and scoring rules.\n"
"# Each such part can be represented with an &lt;index&gt; tag. The index tags\n"
"# are processed in the order you give them in this ruleset file. Tags that\n"
"# are specialized for the &lt;index&gt; tag which contains them are highlighted\n"
"# in <font color=red>red</font>.\n"
"\n"
"# The following &lt;index&gt; tag block tells Gigablast how to index the words\n"
"# in the HTML &lt;title&gt; tag. The words in the title tag are indexed before \n"
"# the words in the body because we don't want words in the body to count \n"
"# towards the &lt;maxScore&gt; limit placed on the words in the title.\n"
"<b>&lt;index&gt;</b>\n"
"\n"
" # The part of the document to which this &lt;index&gt; tag applies.\n"
" # This particular one says to index the terms in the &lt;title&gt;\n"
" # tag. This could just as easily be an &lt;h1&gt; tag or even a non-HTML\n"
" # tag like &lt;foobar&gt;. Omit this tag or leave the value of the tag blank\n"
" # to index the whole body of the document.<a name=\"indexname\"></>\n"
" <font color=red><b>&lt;name&gt; title &lt;/&gt;</b></font>\n"
"\n"
" # Spam detection will be performed on these terms if the URL's quality is\n"
" # this or lower. It is mostly disabled for these title terms because they \n"
" # are restricted in score by other means below. Spam detection may lower the\n"
" # scores of repeated terms.\n"
" <b>&lt;maxQualityForSpamDetect&gt; 0 &lt;/&gt;</b>\n"
"\n"
" # If the URL's quality is below this, then do not index the terms in the\n"
" # title tag.\n"
" <b>&lt;minQualityToIndex&gt; 0 &lt;/&gt;</b>\n"
"\n"
" # If this is 'yes' then convert HTML entities in the title, like &AMP;gt;,\n"
" # into their represented characters before indexing.\n"
" <b>&lt;filterHtmlEntities&gt; yes &lt;/&gt;</b>\n"
"\n"
" # Should each term in the title only be indexed if it has not already been \n"
" # indexed? You can affect this by changing the order of the &lt;index&gt; tags.\n"
" <b>&lt;indexIfUniqueOnly&gt; no &lt;/&gt;</b>\n"
"\n"
" # Should single words in the title be indexed?\n"
" <b>&lt;indexSingletons&gt; yes &lt;/&gt;</b>\n"
"\n"
" # Should phrases in the title be indexed?\n"
" <b>&lt;indexPhrases&gt; yes &lt;/&gt;</b>\n"
"\n"
" # Should the whole title be indexed as one \"word\"?\n"
" <b>&lt;indexAsWhole&gt; no &lt;/&gt;</b>\n"
"\n"
" # Should stop words be used when indexing phrases in the title?\n"
" <b>&lt;useStopWords&gt; yes &lt;/&gt;</b>\n"
"\n"
" # Should we also index the stem of each word indexed? If \n"
" # &lt;indexSingletons&gt; is false this is ignored.\n"
" <b>&lt;useStems&gt; no &lt;/&gt;</b>\n"
"\n"
" # Map the URL's quality to a maximum length (in characters) of the title.\n"
" # Words whose characters occur passed the maximum length will not be \n"
" # indexed. Read more about <a href=\"#qualitysection\">quality</a> or <a href=\"#map\">maps</a>.\n"
" # This keeps the indexed portion of the title down to 200 characters for \n"
" # all qualities.\n"
" <b>&lt;quality11&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;maxLen11&gt; 200 &lt;/&gt;</b>\n"
"\n"
" # Map the URL's quality to a maximum score for indexing the terms in the\n"
" # title. 100%% is the maximum 'maximum score'. You cannot exceed 100%% ever.\n"
" <b>&lt;quality21&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;quality22&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;quality23&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;quality24&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;quality25&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;maxScore21&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;maxScore22&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;maxScore23&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;maxScore24&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;maxScore25&gt; 100 &lt;/&gt;</b>\n"
"\n"
" # Map the URL's quality to a percentage score boost for the terms in the \n"
" # title. This boost is multiplied by the score of each term indexed.\n"
" <b>&lt;quality31&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;quality32&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;quality33&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;quality34&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;quality35&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight31&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight32&gt; 100 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight33&gt; 150 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight34&gt; 200 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight35&gt; 250 &lt;/&gt;</b>\n"
"\n"
" # Map the URL's title length (in characters) to a percentage score boost for\n"
" # the terms in the title. This boost is multiplied by the score of each \n"
" # term indexed.\n"
" <b>&lt;len41&gt; 10 &lt;/&gt;</b>\n"
" <b>&lt;len42&gt; 50 &lt;/&gt;</b> \n"
" <b>&lt;len43&gt; 100 &lt;/&gt;</b> \n"
" <b>&lt;len44&gt; 200 &lt;/&gt;</b> \n"
" <b>&lt;len45&gt; 500 &lt;/&gt;</b> \n"
" <b>&lt;scoreWeight41&gt; 200 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight42&gt; 150 &lt;/&gt;</b> \n"
" <b>&lt;scoreWeight43&gt; 100 &lt;/&gt;</b> \n"
" <b>&lt;scoreWeight44&gt; 75 &lt;/&gt;</b> \n"
" <b>&lt;scoreWeight45&gt; 50 &lt;/&gt;</b> \n"
"\n"
" # Map the URL's title length (in characters) to a maximum score for the \n"
" # terms in the title. This maximum is expressed as a percentage of the\n"
" # maximum score physically possible.\n"
" <b>&lt;len51&gt; 100 &lt;/&gt;</b>\n"
" <b>&lt;maxScore51&gt; 30 &lt;/&gt;</b>\n"
"\n"
"<b>&lt;/index&gt;</b>\n"
"\n"
"\n"
"# The following &lt;index&gt; block tells Gigablast how to index the body.\n"
"# This will index words in the title tag, too, because that is considered \n"
"# part of the body. The body is essentially everything not in a meta tag, \n"
"# comment or javascript tag.\n"
"<b>&lt;index&gt;</b>\n"
"\n"
" # Should gigablast break the document into sections and score the\n"
" # words in sections with mostly link text lower than words in sections\n"
" # without much link text? This helps to reduce the effects of menu spam.\n"
" # Used for news articles.\n"
" # This only applies to the body of the document.\n"
" <b>&lt;scoreBySection&gt; no &lt;/&gt;</b> (default is yes)\n"
"\n"
" # Should gigablast attempt to isolate just the single most-relevant\n"
" # content section from the document and not index anything else?\n"
" # Used for news articles.\n"
" # This only applies to the body of the document.\n"
" <b>&lt;indexContentSectionOnly&gt; no &lt;/&gt;</b> (default is no)\n"
"\n"
" # The minimum score an entire section of the document needs to have its\n"
" # words indexed. Each word in a section counts as 128 points, but a\n"
" # word in a hyperlink counts as -256 points.\n"
" # Used for news articles.\n"
" # This only applies to the body of the document.\n"
" <b>&lt;minSectionScore&gt; -1000000000 &lt;/&gt;</b> (default is "
" -1000000000)\n"
"\n"
" # Count words in links as 21 points, words not in links as 128.\n"
" # the average score of each word is its score plus the scores of\n"
" # its 8 left and its 7 right neighbors divided by 16. If that\n"
" # average score is below this value, the word is not indexed and its\n"
" # average score is set to 0. Only valid if scoreBySection is true.\n"
" <b>&lt;minAvgWordScore&gt; 0 &lt;/&gt;</b> (default is 0)\n"
"\n"
" # If the number of indexable words that have a positive average score\n"
" # is below this value, then no words will be indexed. Used\n"
" # to just index beefy news articles. -1 means to ignore this constraint.\n"
" <b>&lt;minIndexableWords&gt; -1 &lt;/&gt;</b> (default is -1)\n"
"\n"
" # Weight the first X words higher.\n"
" # Used for news articles.\n"
" # This only applies to the body of the document.\n"
" <b>&lt;numTopWords&gt; 0 &lt;/&gt;</b> (default is 0)\n"
"\n"
" # Weight the first X words by this much, a rational number.\n"
" # Used for news articles.\n"
" # This only applies to the body of the document.\n"
" <b>&lt;topWordsWeight&gt; 1.0 &lt;/&gt;</b> (default is 1.0)\n"
"\n"
" # Weight the first sentence by this much, a rational number.\n"
" # Only applies to documents that support western punctuation.\n"
" # Used for news articles.\n"
" # This only applies to the body of the document.\n"
" <b>&lt;topSentenceWeight&gt; 1.0 &lt;/&gt;</b> (default is 1.0)\n"
"\n"
" # Do not weight more than this words in the first sentence.\n"
" # Used for news articles.\n"
" # This only applies to the body of the document.\n"
" <b>&lt;maxWordsInSentence&gt; 0 &lt;/&gt;</b> (default is 0)\n"
"\n"
" # For the body, we turn spam detection on for all URLs, regardless of\n"
" # their quality. This will demote the scores of terms that are repetitious.\n"
" <b>&lt;maxQualityForSpamDetect&gt; 100 &lt;/&gt;</b> \n"
"\n"
" # These are all the same as the &lt;index&gt; tag above this one.\n"
" <b>&lt;minQualityToIndex&gt; 0 &lt;/&gt;</b> \n"
" <b>&lt;filterHtmlEntities&gt; yes &lt;/&gt;</b> \n"
" <b>&lt;indexIfUniqueOnly&gt; no &lt;/&gt;</b> \n"
" <b>&lt;indexSingletons&gt; yes &lt;/&gt;</b> \n"
" <b>&lt;indexPhrases&gt; yes &lt;/&gt;</b> \n"
" <b>&lt;indexAsWhole&gt; no &lt;/&gt;</b> \n"
" <b>&lt;useStopWords&gt; yes &lt;/&gt;</b> \n"
" <b>&lt;useStems&gt; no &lt;/&gt;</b> \n"
"\n"
" # Map the URL's quality to a maximum length (in characters) of the body.\n"
" # This length does not include tags. Some tags, like &lt;br&gt; are \n"
" # converted into \\n\\n, but most are not. Words whose characters occur \n"
" # passed the maximum length will not be indexed. Read more about <a href=\"#qualitysection\">quality</a> or\n"
" # <a href=\"#map\">maps</a>.\n"
//" # You will still be limited by the \"#define MAX_WORDS 10000\"\n"
//" # statement, but this is slated to disappear soon.\n"
" <b>&lt;quality11&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;quality12&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;quality13&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;quality14&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;quality15&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;maxLen11&gt; 80000 &lt;/&gt;</b>\n"
" <b>&lt;maxLen12&gt; 100000 &lt;/&gt;</b>\n"
" <b>&lt;maxLen13&gt; 100000 &lt;/&gt;</b>\n"
" <b>&lt;maxLen14&gt; 100000 &lt;/&gt;</b>\n"
" <b>&lt;maxLen15&gt; 100000 &lt;/&gt;</b>\n"
"\n"
" # Map the URL's quality to a maximum score for indexing the terms in the\n"
" # body. 100%% is the maximum 'maximum score'. You cannot exceed 100%% ever.\n"
" <b>&lt;quality21&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;quality22&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;quality23&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;quality24&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;quality25&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;maxScore21&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;maxScore22&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;maxScore23&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;maxScore24&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;maxScore25&gt; 100 &lt;/&gt;</b>\n"
"\n"
" # Map the URL's quality to a percentage score boost for the terms in the\n"
" # body. This boost is multiplied by the score of each term indexed.\n"
" <b>&lt;quality31&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;quality32&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;quality33&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;quality34&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;quality35&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight31&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight32&gt; 100 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight33&gt; 150 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight34&gt; 200 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight35&gt; 250 &lt;/&gt;</b>\n"
"\n"
" # Map the length of the body (in characters) to a percentage score boost for\n"
" # the terms in the body. This boost is multiplied by the score of each term \n"
" # indexed. This length does not include tags. Some tags, like &lt;br&gt; are\n"
" # converted into \n\n, but most are not.\n"
" # This is now obsolete for newer documents. Please use the numWords map \n"
" # immediately following. It supports unicode better, too.\n"
" #<b>&lt;len41&gt; 100 &lt;/&gt;</b>\n"
" #<b>&lt;len42&gt; 500 &lt;/&gt;</b>\n"
" #<b>&lt;len43&gt; 1000 &lt;/&gt;</b>\n"
" #<b>&lt;len44&gt; 2000 &lt;/&gt;</b>\n"
" #<b>&lt;len45&gt; 5000 &lt;/&gt;</b>\n"
" #<b>&lt;len46&gt; 10000 &lt;/&gt;</b>\n"
" #<b>&lt;len47&gt; 20000 &lt;/&gt;</b>\n"
" #<b>&lt;len48&gt; 50000 &lt;/&gt;</b>\n"
" #<b>&lt;scoreWeight41&gt; 300 &lt;/&gt;</b>\n"
" #<b>&lt;scoreWeight42&gt; 250 &lt;/&gt;</b> \n"
" #<b>&lt;scoreWeight43&gt; 200 &lt;/&gt;</b> \n"
" #<b>&lt;scoreWeight44&gt; 150 &lt;/&gt;</b> \n"
" #<b>&lt;scoreWeight45&gt; 100 &lt;/&gt;</b> \n"
" #<b>&lt;scoreWeight46&gt; 80 &lt;/&gt;</b> \n"
" #<b>&lt;scoreWeight47&gt; 60 &lt;/&gt;</b> \n"
" #<b>&lt;scoreWeight48&gt; 40 &lt;/&gt;</b> \n"
"\n"
" # Map the number of words to a percentage score boost for the terms in \n"
" # the body. This boost is multiplied by the score of each term \n"
" # indexed.\n"
" <b>&lt;numWords41&gt; 20 &lt;/&gt;</b>\n"
" <b>&lt;numWords42&gt; 100 &lt;/&gt;</b>\n"
" <b>&lt;numWords43&gt; 200 &lt;/&gt;</b>\n"
" <b>&lt;numWords44&gt; 400 &lt;/&gt;</b>\n"
" <b>&lt;numWords45&gt; 1000 &lt;/&gt;</b>\n"
" <b>&lt;numWords46&gt; 2000 &lt;/&gt;</b>\n"
" <b>&lt;numWords47&gt; 4000 &lt;/&gt;</b>\n"
" <b>&lt;numWords48&gt; 10000 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight41&gt; 300 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight42&gt; 250 &lt;/&gt;</b> \n"
" <b>&lt;scoreWeight43&gt; 200 &lt;/&gt;</b> \n"
" <b>&lt;scoreWeight44&gt; 150 &lt;/&gt;</b> \n"
" <b>&lt;scoreWeight45&gt; 100 &lt;/&gt;</b> \n"
" <b>&lt;scoreWeight46&gt; 80 &lt;/&gt;</b> \n"
" <b>&lt;scoreWeight47&gt; 60 &lt;/&gt;</b> \n"
" <b>&lt;scoreWeight48&gt; 40 &lt;/&gt;</b> \n"
"\n"
" # Map the URL's quality to a maximum score for indexing the terms in the\n"
" # body. 100%% is the maximum 'maximum score'. You cannot exceed 100%% ever.\n"
" <b>&lt;len51&gt; 100 &lt;/&gt;</b>\n"
" <b>&lt;len52&gt; 500 &lt;/&gt;</b>\n"
" <b>&lt;len53&gt; 1000 &lt;/&gt;</b>\n"
" <b>&lt;len54&gt; 2000 &lt;/&gt;</b>\n"
" <b>&lt;len55&gt; 5000 &lt;/&gt;</b>\n"
" <b>&lt;maxScore51&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;maxScore52&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;maxScore53&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;maxScore54&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;maxScore55&gt; 100 &lt;/&gt;</b>\n"
"\n"
"<b>&lt;/index&gt;</b>\n"
"\n"
"# This one is similar to above, but we're indexing \"title:\" terms.\n"
"# The major difference are in red.\n"
"<b>&lt;index&gt;</b>\n"
" <b>&lt;name&gt; title &lt;/&gt;</b>\n"
" <font color=red><b>&lt;prefix&gt; title &lt;/&gt;</b> # prepend a \"title:\" to the term before indexing</font>\n"
" <b>&lt;maxQualityForSpamDetect&gt; 0 &lt;/&gt;</b>\n"
" <b>&lt;minQualityToIndex&gt; 0 &lt;/&gt;</b>\n"
" <b>&lt;filterHtmlEntities&gt; yes &lt;/&gt;</b>\n"
"\n"
" # This tells Gigablast not to index a word or phrase if it has already been\n"
" # indexed. This means that repeating terms in the title will have no affect.\n"
" <font color=red><b>&lt;indexIfUniqueOnly&gt; yes &lt;/&gt;</b></font>\n"
"\n"
" <b>&lt;indexSingletons&gt; yes &lt;/&gt;</b>\n"
" <b>&lt;indexPhrases&gt; yes &lt;/&gt;</b>\n"
" <b>&lt;indexAsWhole&gt; no &lt;/&gt;</b>\n"
" <b>&lt;useStopWords&gt; yes &lt;/&gt;</b>\n"
" <b>&lt;useStems&gt; no &lt;/&gt;</b>\n"
"\n"
" # Map URL's quality to a maximum length for this field.\n"
" <b>&lt;quality11&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;quality12&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;quality13&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;quality14&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;quality15&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;maxLen11&gt; 80000 &lt;/&gt;</b>\n"
" <b>&lt;maxLen12&gt; 100000 &lt;/&gt;</b>\n"
" <b>&lt;maxLen13&gt; 150000 &lt;/&gt;</b>\n"
" <b>&lt;maxLen14&gt; 200000 &lt;/&gt;</b>\n"
" <b>&lt;maxLen15&gt; 250000 &lt;/&gt;</b>\n"
"\n"
" # Map URL's quality to a maximum score for terms in this field.\n"
" <b>&lt;quality21&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;quality22&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;quality23&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;quality24&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;quality25&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;maxScore21&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;maxScore22&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;maxScore23&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;maxScore24&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;maxScore25&gt; 100 &lt;/&gt;</b>\n"
"\n"
" # Map URL's quality to a percentage score boost for terms in this field.\n"
" <b>&lt;quality31&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;quality32&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;quality33&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;quality34&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;quality35&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight31&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight32&gt; 100 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight33&gt; 150 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight34&gt; 200 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight35&gt; 250 &lt;/&gt;</b>\n"
"\n"
" # Map the field's length to a percentage score boost for terms in this \n"
" # field.\n"
" <b>&lt;len41&gt; 100 &lt;/&gt;</b>\n"
" <b>&lt;len42&gt; 500 &lt;/&gt;</b>\n"
" <b>&lt;len43&gt; 1000 &lt;/&gt;</b>\n"
" <b>&lt;len44&gt; 2000 &lt;/&gt;</b>\n"
" <b>&lt;len45&gt; 5000 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight41&gt; 300 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight42&gt; 200 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight43&gt; 150 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight44&gt; 100 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight45&gt; 50 &lt;/&gt;</b>\n"
"\n"
" # Map the field's length to a maximum score for terms in this field.\n"
" <b>&lt;len51&gt; 100 &lt;/&gt;</b>\n"
" <b>&lt;len52&gt; 500 &lt;/&gt;</b>\n"
" <b>&lt;len53&gt; 1000 &lt;/&gt;</b>\n"
" <b>&lt;len54&gt; 2000 &lt;/&gt;</b>\n"
" <b>&lt;len55&gt; 5000 &lt;/&gt;</b>\n"
" <b>&lt;maxScore51&gt; 30 &lt;/&gt;</b>\n"
" <b>&lt;maxScore52&gt; 45 &lt;/&gt;</b>\n"
" <b>&lt;maxScore53&gt; 60 &lt;/&gt;</b>\n"
" <b>&lt;maxScore54&gt; 80 &lt;/&gt;</b>\n"
" <b>&lt;maxScore55&gt; 100 &lt;/&gt;</b>\n"
"\n"
"<b>&lt;/index&gt;</b>\n"
"<a name=\"rsmetas\"></a>\n"
"# Now this one is for all the standard, supported meta tags.\n"
"# Terms in these tags have not been indexed yet, but we do that here.\n"
"<b>&lt;index&gt;</b>\n"
"\n"
" # Gigablast allows multiple fields/parts to be specified for indexing\n"
" # under the same parameters. In this case, we treat the meta summary,\n"
" # meta description and meta keywords tags all equally.\n"
" <font color=red><b>&lt;name&gt; meta.summary &lt;/&gt;</b>\n"
" <b>&lt;name&gt; meta.description &lt;/&gt;</b> \n"
" <b>&lt;name&gt; meta.keywords &lt;/&gt;</b></font>\n"
"\n"
"\n"
" <b>&lt;maxQualityForSpamDetect&gt; 0 &lt;/&gt;</b>\n"
" <b>&lt;minQualityToIndex&gt; 0 &lt;/&gt;</b>\n"
" <b>&lt;filterHtmlEntities&gt; yes &lt;/&gt;</b>\n"
"\n"
" # This tells Gigablast not to index a word or phrase if it has already been\n"
" # indexed. This means that repeating terms in these meta tags will have no\n"
" # affect.\n"
" <font color=red><b>&lt;indexIfUniqueOnly&gt; yes &lt;/&gt;</b></font>\n"
"\n"
" <b>&lt;indexSingletons&gt; yes &lt;/&gt;</b>\n"
" <b>&lt;indexPhrases&gt; yes &lt;/&gt;</b>\n"
" <b>&lt;indexAsWhole&gt; no &lt;/&gt;</b>\n"
" <b>&lt;useStopWords&gt; yes &lt;/&gt;</b>\n"
" <b>&lt;useStems&gt; no &lt;/&gt;</b>\n"
"\n"
" # Map URL's quality to a maximum length for this field.\n"
" <b>&lt;quality11&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;maxLen11&gt; 200 &lt;/&gt;</b>\n"
"\n"
" # Map URL's quality to a maximum score for terms in this field.\n"
" <b>&lt;quality21&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;maxScore21&gt; 100 &lt;/&gt;</b>\n"
"\n"
" # Map URL's quality to a percentage score boost for terms in this field.\n"
" <b>&lt;quality31&gt; 15 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight31&gt; 100 &lt;/&gt;</b>\n"
"\n"
" # Map the field's length to a percentage score boost for terms in this \n"
" # field.\n"
" <b>&lt;len41&gt; 100 &lt;/&gt;</b>\n"
" <b>&lt;scoreWeight41&gt; 100 &lt;/&gt;</b>\n"
"\n"
" # Map the field's length to a maximum score for terms in this field.\n"
" <b>&lt;len51&gt; 100 &lt;/&gt;</b>\n"
" <b>&lt;maxScore51&gt; 100 &lt;/&gt;</b>\n"
"\n"
"<b>&lt;/index&gt;</b>\n"
"</pre>\n"
"</b>\n"
"\n"
*/
/*
"<br>\n"
"\n"
"<a name=gbstart></>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>gbstart</font>\n"
"</td></tr></table>\n"
"<br><br>\n"
"This simple script is used to start up all the gb hosts (processes) native to a particular computer. It also redirects the gb programs standard error to a log file. Notice that the gb executable takes the gb.conf filename as its first argument."
"<br><br>\n"
"<pre>\n"
"#!/bin/bash\n"
"# move the old log file\n"
"mv /workdir/loga /workdir/loga-`date '+%%Y_%%m_%%d-%%H:%%M:%%S'`.log\n"
"# start up gb\n"
"/workdir/gb -c /workdir/hosts.conf >& /workdir/loga &\n"
"</pre>\n"
"\n"
"<br>\n"
*/
"<br>\n"
"\n"
""
""
""
""
""
""
""
""
""
"<a name=stopwords></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Stop Words</center>\n"
"</td></tr></table>\n"
"<br>\n"
"<pre>\n"
"at be by of on\n"
"or do he if is\n"
"it in me my re\n"
"so to us vs we\n"
"the and are can did\n"
"per for had has her\n"
"him its not our she\n"
"you also been from have\n"
"here hers ours that them\n"
"then they this were will\n"
"with your about above ain\n"
"could isn their there these\n"
"those would yours theirs aren\n"
"hadn didn hasn ll ve \n"
"should shouldn\n"
"</pre>\n"
"<br>\n"
"<br>\n"
"\n"
"<a name=phrasebreaks></a>\n"
"<table cellpadding=1 border=0 width=100%% bgcolor=#0079ba>\n"
"<tr><td><center><b><font color=#ffffff size=+1>Phrase Breaks</center>\n"
"</td></tr></table>\n"
"<br>\n"
"Certain punctuation breaks up a phrase. All single character punctuation marks can be phrased across, with the exception of the following:\n"
"<table border=1 cellpadding=6><tr><td colspan=11><b>Breaking Punctuation (1 char)</b></td></tr>\n"
"<tr>\n"
"<td>?</td><td>!</td><td>;</td><td>{</td><td>}</td><td>&lt;</td><td>&gt;</td><td>171</td><td>187</td><td>191</td><td>161</td></tr></table>\n"
"<br><br>\n"
"The following 2 character punctuation sequences break phrases:\n"
"<table border=1 cellpadding=6><tr><td colspan=12><b>Breaking Punctuation (2 chars)( _ = whitespace = \\t, \\n, \\r or \\0x20)</b></td></tr>\n"
"<tr><td>?_</td><td>!_</td><td>;_</td><td>{_</td><td>}_</td><td>&lt;_</td><td>&gt;_</td><td>171_</td><td>187_</td><td>191_</td><td>161_</td><td>_.</td></tr>\n"
"<tr><td>_?</td><td>_!</td><td>_;</td><td>_{</td><td>_}</td><td>_&lt;</td><td>_&gt;</td><td>_171</td><td>_187</td><td>_191</td><td>_161</td><td>_.</td></tr>\n"
"<tr><td colspan=12>Any 2 character combination with NO whitespaces with the exception of \"<b>/~</b>\"</td></tr>\n"
"</table>\n"
"<br><br>\n"
"\n"
"All 3 character sequences of punctuation break phrases with the following exceptions:\n"
"<table border=1 cellpadding=6><tr><td colspan=12><b><u>NON</u>-Breaking Punctuation (3 chars)( _ = whitespace = \\t, \\n, \\r or \\0x20)</b></td></tr>\n"
"<tr><td>://</td><td>___</td><td>_,_</td><td>_-_</td><td>_+_</td><td>_&amp;_</td></tr>\n"
"</table>\n"
"<br><br>\n"
"\n"
"All sequences of punctuation greater than 3 characters break phrases with the sole exception being a sequence of strictly whitespaces.\n"
"\n"
"<br><br>\n"
"<br>\n"
"<center>\n"
"<font size=-1>\n"
"<b>\n"
"<a href=/products.html>products</a> &nbsp; &nbsp;\n"
"<a href=/help.html>help</a> &nbsp; &nbsp;\n"
"<a href=/addurl>add a url</a> &nbsp; &nbsp;\n"
"<a href=/about.html>about</a> &nbsp; &nbsp;\n"
"<a href=/contact.html>contact</a>\n"
"</b>\n"
"</font>\n"
"</html>\n"
"\n");
//if ( user == USER_MASTER ) p += gbstrlen(p);
if ( g_users.hasPermission( r, PAGE_MASTER ) ) p += gbstrlen(p);
*p = '\0';
long bufLen = gbstrlen(buf);
return g_httpServer.sendDynamicPage ( s , buf , bufLen );
}