private static void HandleSignal(int signal) { Logger.Log.Debug("Handling signal {0} ({1})", signal, (Mono.Unix.Native.Signum)signal); // Pass the signals to the helper too. GLib.Idle.Add(new GLib.IdleHandler(delegate() { RemoteIndexer.SignalRemoteIndexer((Mono.Unix.Native.Signum)signal); return(false); })); // If we get SIGUSR1, turn the debugging level up. if ((Mono.Unix.Native.Signum)signal == Mono.Unix.Native.Signum.SIGUSR1) { LogLevel old_level = Log.Level; Log.Level = LogLevel.Debug; Log.Debug("Moving from log level {0} to Debug", old_level); return; } else if ((Mono.Unix.Native.Signum)signal == Mono.Unix.Native.Signum.SIGUSR2) { // Debugging hook for beagrepd QueryDriver.DebugHook(); LuceneCommon.DebugHook(); return; } Logger.Log.Debug("Initiating shutdown in response to signal."); Shutdown.BeginShutdown(); }
private void OnQueryDriverChanged(Queryable queryable, IQueryableChangeData change_data) { if (this.result != null) { QueryDriver.DoOneQuery(queryable, this.query, this.result, change_data); } }
public override ResponseMessage Execute(RequestMessage request) { DaemonInformationResponse response = new DaemonInformationResponse(); DaemonInformationRequest req = (DaemonInformationRequest)request; if (req.GetVersion) { response.Version = ExternalStringsHack.Version; } if (req.GetSchedInfo) { response.SchedulerInformation = Scheduler.Global.GetCurrentStatus(); } if (req.GetIndexStatus) { response.IndexStatus = new ArrayList(); foreach (QueryableStatus status in QueryDriver.GetIndexInformation()) { response.IndexStatus.Add(status); } } if (req.GetIsIndexing) { response.IsIndexing = QueryDriver.IsIndexing; } return(response); }
public override ResponseMessage Execute(RequestMessage req) { RemovableIndexRequest r = (RemovableIndexRequest)req; bool to_mount = r.Mount; string index_dir = r.IndexDir; string mnt_dir = r.MountDir; return(QueryDriver.HandleRemovableIndexRequest(to_mount, index_dir, mnt_dir)); }
////////////////////////////////////////////////////////////////////////////////// protected void AddIndexable(Indexable indexable) { indexable.Source = QueryDriver.GetQueryable(this).Name; lock (request_lock) pending_request.Add(indexable); // Schedule a final flush every time we add anything. // Better safe than sorry. ScheduleFinalFlush(); }
public override ResponseMessage Execute(RequestMessage request) { CountMatchQueryResponse result = new CountMatchQueryResponse(); CountMatchQuery query = request as CountMatchQuery; if (query == null) { return(new ErrorResponse("Only CountMatch query please!")); } result.NumMatches = QueryDriver.DoCountMatchQuery(query); return(result); }
public override ResponseMessage Execute(RequestMessage req) { this.query = (Query)req; this.result = new QueryResult(); this.result.IsIndexListener = this.query.IsIndexListener; AttachResult(); QueryDriver.ChangedEvent += OnQueryDriverChanged; QueryDriver.DoQuery(query, this.result, new RequestMessageExecutor.AsyncResponse(this.SendAsyncResponse)); // Don't send a response; we'll be sending them async return(null); }
public ISnippetReader GetSnippet(string[] query_terms, Hit hit, bool full_text, int ctx_length, int snp_length) { if (hit == null) { return(null); } // Sanity-check: make sure this Hit actually came out of this Queryable if (QueryDriver.GetQueryable(hit.Source) != this) { string msg = String.Format("Queryable mismatch in GetSnippet: {0} vs {1}", hit.Source, this); throw new Exception(msg); } try { return(iqueryable.GetSnippet(query_terms, hit, full_text, ctx_length, snp_length)); } catch (Exception ex) { Logger.Log.Warn(ex, "Caught exception calling DoQuery on '{0}'", Name); } return(null); }
public override ResponseMessage Execute(RequestMessage req) { SnippetRequest request = (SnippetRequest)req; Queryable queryable = QueryDriver.GetQueryable(request.Hit.Source); ISnippetReader snippet_reader; bool full_text = request.FullText; int ctx_length = request.ContextLength; int snp_length = request.SnippetLength; if (queryable == null) { Log.Error("SnippetExecutor: No queryable object matches '{0}'", request.Hit.Source); snippet_reader = new SnippetReader(null, null, false, -1, -1); full_text = false; } else { snippet_reader = queryable.GetSnippet(request.QueryTerms, request.Hit, full_text, ctx_length, snp_length); } return(new SnippetResponse(new SnippetList(full_text, snippet_reader))); }
public static void DoMain(string[] args) { SystemInformation.InternalCallInitializer.Init(); SystemInformation.SetProcessName("beagrepd"); // Process the command-line arguments bool arg_debug = false; bool arg_debug_memory = false; bool arg_fg = false; int i = 0; while (i < args.Length) { string arg = args [i]; ++i; string next_arg = i < args.Length ? args [i] : null; switch (arg) { case "-h": case "--help": PrintUsage(); Environment.Exit(0); break; case "--mdb": case "--mono-debug": // Silently ignore these arguments: they get handled // in the wrapper script. break; case "--list-backends": Console.WriteLine("Current available backends:"); Console.Write(QueryDriver.ListBackends()); Environment.Exit(0); break; case "--fg": case "--foreground": arg_fg = true; break; case "--bg": case "--background": arg_fg = false; break; case "--replace": arg_replace = true; break; case "--debug": arg_debug = true; break; case "--heap-shot": arg_heap_shot = true; arg_debug = true; arg_debug_memory = true; break; case "--no-snapshots": case "--no-snapshot": arg_heap_shot_snapshots = false; break; case "--heap-buddy": case "--debug-memory": arg_debug = true; arg_debug_memory = true; break; case "--indexing-test-mode": arg_indexing_test_mode = true; arg_fg = true; break; case "--backend": if (next_arg == null) { Console.WriteLine("--backend requires a backend name"); Environment.Exit(1); break; } if (next_arg.StartsWith("--")) { Console.WriteLine("--backend requires a backend name. Invalid name '{0}'", next_arg); Environment.Exit(1); break; } if (next_arg [0] != '+' && next_arg [0] != '-') { QueryDriver.OnlyAllow(next_arg); } else { if (next_arg [0] == '+') { QueryDriver.Allow(next_arg.Substring(1)); } else { QueryDriver.Deny(next_arg.Substring(1)); } } ++i; // we used next_arg break; case "--add-static-backend": if (next_arg != null) { QueryDriver.AddStaticQueryable(next_arg); } ++i; break; case "--disable-scheduler": arg_disable_scheduler = true; break; case "--indexing-delay": if (next_arg != null) { try { QueryDriver.IndexingDelay = Int32.Parse(next_arg); } catch { Console.WriteLine("'{0}' is not a valid number of seconds", next_arg); Environment.Exit(1); } } ++i; break; case "--autostarted": // FIXME: This option is deprecated and will be removed in a future release. break; case "--disable-text-cache": disable_textcache = true; break; case "--version": VersionFu.PrintVersion(); Environment.Exit(0); break; default: Console.WriteLine("Unknown argument '{0}'", arg); Environment.Exit(1); break; } } if (Environment.GetEnvironmentVariable("SABAYON_SESSION_RUNNING") == "yes") { Console.WriteLine("Beagrep is running underneath Sabayon, exiting."); Environment.Exit(0); } if (arg_indexing_test_mode) { LuceneQueryable.OptimizeRightAway = true; } // Bail out if we are trying to run as root if (Environment.UserName == "root" && Environment.GetEnvironmentVariable("SUDO_USER") != null) { Console.WriteLine("You appear to be running beagrep using sudo. This can cause problems with"); Console.WriteLine("permissions in your .beagrep and .wapi directories if you later try to run"); Console.WriteLine("as an unprivileged user. If you need to run beagrep as root, please use"); Console.WriteLine("'su -c' instead."); Environment.Exit(-1); } if (Environment.UserName == "root" && !Conf.Daemon.GetOption(Conf.Names.AllowRoot, false)) { Console.WriteLine("You can not run beagrep as root. Beagrep is designed to run from your own"); Console.WriteLine("user account. If you want to create multiuser or system-wide indexes, use"); Console.WriteLine("the beagrep-build-index tool."); Console.WriteLine(); Console.WriteLine("You can override this setting using the beagrep-config or beagrep-settings tools."); Environment.Exit(-1); } try { string tmp = PathFinder.HomeDir; } catch (Exception e) { Console.WriteLine("Unable to start the daemon: {0}", e.Message); Environment.Exit(-1); } MainLoopThread = Thread.CurrentThread; // FIXME: We always turn on full debugging output! We are still // debugging this code, after all... // arg_debug ? LogLevel.Debug : LogLevel.Warn Log.Initialize(PathFinder.LogDir, "Beagrep", LogLevel.Debug, arg_fg); Log.Always("Starting Beagrep Daemon (version {0})", ExternalStringsHack.Version); Log.Always("Running on {0}", SystemInformation.MonoRuntimeVersion); Log.Always("Command Line: {0}", Environment.CommandLine != null ? Environment.CommandLine : "(null)"); if (!ExtendedAttribute.Supported) { Logger.Log.Warn("Extended attributes are not supported on this filesystem. " + "Performance will suffer as a result."); } if (disable_textcache) { Log.Warn("Running with text-cache disabled!"); Log.Warn("*** Snippets will not be returned for documents indexed in this session."); } // Check if global configuration files are installed if (!Conf.CheckGlobalConfig()) { Console.WriteLine("Global configuration files not found in '{0}'", PathFinder.ConfigDataDir); Environment.Exit(-1); } // Start our memory-logging thread if (arg_debug_memory) { ExceptionHandlingThread.Start(new ThreadStart(LogMemoryUsage)); } // Do BEAGREP_EXERCISE_THE_DOG_HARDER-related processing. ExerciseTheDogHarder(); // Initialize GObject type system g_type_init(); // Lower our CPU priority SystemPriorities.Renice(7); QueryDriver.Init(); Server.Init(); #if MONO_1_9 Shutdown.SetupSignalHandlers(new Shutdown.SignalHandler(HandleSignal)); #else SetupSignalHandlers(); #endif Shutdown.ShutdownEvent += OnShutdown; main_loop = new MainLoop(); Shutdown.RegisterMainLoop(main_loop); // Defer all actual startup until the main loop is // running. That way shutdowns during the startup // process work correctly. GLib.Idle.Add(new GLib.IdleHandler(StartupProcess)); // Start our event loop. main_loop.Run(); // We're out of the main loop now, join all the // running threads so we can exit cleanly. ExceptionHandlingThread.JoinAllThreads(); // If we placed our sockets in a temp directory, try to clean it up // Note: this may fail because the helper is still running if (PathFinder.GetRemoteStorageDir(false) != PathFinder.StorageDir) { try { Directory.Delete(PathFinder.GetRemoteStorageDir(false)); } catch (IOException) { } } Log.Always("Beagrep daemon process shut down cleanly."); }
public static bool StartupProcess() { // Profile our initialization Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); // Fire up our server if (!StartServer()) { if (!arg_replace) { Logger.Log.Error("Could not set up the listener for beagrep requests. " + "There is probably another beagrepd instance running. " + "Use --replace to replace the running service"); Environment.Exit(1); } ReplaceExisting(); } // Set up out-of-process indexing LuceneQueryable.IndexerHook = new LuceneQueryable.IndexerCreator(RemoteIndexer.NewRemoteIndexer); Config config = Conf.Get(Conf.Names.DaemonConfig); // Initialize synchronization to keep the indexes local if PathFinder.StorageDir // is on a non-block device, or if BEAGREP_SYNCHRONIZE_LOCALLY is set if ((!SystemInformation.IsPathOnBlockDevice(PathFinder.StorageDir) && config.GetOption(Conf.Names.IndexSynchronization, true)) || Environment.GetEnvironmentVariable("BEAGREP_SYNCHRONIZE_LOCALLY") != null) { IndexSynchronization.Initialize(); } // Start the query driver. Logger.Log.Debug("Starting QueryDriver"); QueryDriver.Start(); // Start our battery monitor so we can shut down the // scheduler if needed. BatteryMonitor.Init(); bool initially_on_battery = !BatteryMonitor.UsingAC && !config.GetOption(Conf.Names.IndexOnBattery, false); // Start the Global Scheduler thread if (!arg_disable_scheduler) { if (!initially_on_battery) { Logger.Log.Debug("Starting Scheduler thread"); Scheduler.Global.Start(); } else { Log.Debug("Beagrep started on battery, not starting scheduler thread"); } } // Start our Inotify threads Inotify.Start(); // Test if the FileAdvise stuff is working: This will print a // warning if not. The actual advice calls will fail silently. FileAdvise.TestAdvise(); #if ENABLE_AVAHI zeroconf = new Beagrep.Daemon.Network.Zeroconf(); #endif Conf.WatchForUpdates(); stopwatch.Stop(); Logger.Log.Debug("Daemon initialization finished after {0}", stopwatch); SystemInformation.LogMemoryUsage(); if (arg_indexing_test_mode) { Thread.Sleep(1000); // Ugly paranoia: wait a second for the backends to settle. Logger.Log.Debug("Running in indexing test mode"); Scheduler.Global.EmptyQueueEvent += OnEmptySchedulerQueue; Scheduler.Global.Add(null); // pulse the scheduler } return(false); }
protected void Flush(bool continue_only) { IndexerRequest flushed_request; if (continue_only) { // if the request is merely to signal indexhelper to continue indexing, // then sent a fake indexerrequest but then use the previous request to retrieve // deferred indexables flushed_request = new IndexerRequest(); flushed_request.ContinueIndexing = true; // Do not pass this through PreFlushHook since this is a fake request } else { lock (request_lock) { if (pending_request.IsEmpty) { return; } flushed_request = pending_request; pending_request = new IndexerRequest(); // We hold the request_lock when calling PreFlushHook, so // that no other requests can come in until it exits. PreFlushHook(flushed_request); } } IndexerReceipt [] receipts; receipts = indexer.Flush(flushed_request); PostFlushHook(flushed_request, receipts); if (continue_only) { flushed_request = pending_request; } // Silently return if we get a null back. This is probably // a bad thing to do. If IndexHelper is shutdown because of // memory blowup or is crashed, then null is returned. Silently // returning means ignoring the indexables in the IndexHelper's // queue (which could be more than what was sent in the last request, // since there could be some deferred-indexables too). if (receipts == null) { return; } // Nothing happened (except maybe an optimize, which does not // generate a receipt). Also do nothing. if (receipts.Length == 0) { return; } // Update the cached count of items in the driver // FIXME: Verify that this still works after all the deferred-indexable fu driver.SetItemCount(indexer.GetItemCount()); // Something happened, so schedule an optimize just in case. ScheduleOptimize(); if (fa_store != null) { fa_store.BeginTransaction(); } ArrayList added_uris = new ArrayList(); ArrayList removed_uris = new ArrayList(); bool indexer_indexable_receipt = false; for (int i = 0; i < receipts.Length; ++i) { if (receipts [i] is IndexerAddedReceipt) { IndexerAddedReceipt r; r = (IndexerAddedReceipt)receipts [i]; Indexable indexable = flushed_request.RetrieveRequestIndexable(r); if (indexable == null) { Log.Debug("Should not happen! Previously requested indexable with id #{0} has eloped!", r.Id); continue; } // Add the Uri to the list for our change data // *before* doing any post-processing. // This ensures that we have internal uris when // we are remapping. added_uris.Add(indexable.Uri); // Call the appropriate hook Uri notification_uri = indexable.Uri; try { // Map from internal->external Uris in the PostAddHook notification_uri = PostAddHook(indexable, r); } catch (Exception ex) { Logger.Log.Warn(ex, "Caught exception in PostAddHook '{0}' '{1}' '{2}'", indexable.Uri, r.FilterName, r.FilterVersion); } // Every added Uri also needs to be listed as removed, // to avoid duplicate hits in the query. Since the // removed Uris need to be external Uris, we add them // to the list *after* post-processing. removed_uris.Add(notification_uri); } else if (receipts [i] is IndexerRemovedReceipt) { IndexerRemovedReceipt r; r = (IndexerRemovedReceipt)receipts [i]; Indexable indexable = flushed_request.RetrieveRequestIndexable(r); if (indexable == null) // Should never happen { Log.Warn("Unable to match indexable-remove #{0} to any request!", r.Id); continue; } // Call the appropriate hook Uri notification_uri = indexable.Uri; try { notification_uri = PostRemoveHook(indexable, r.NumRemoved); } catch (Exception ex) { Logger.Log.Warn(ex, "Caught exception in PostRemoveHook '{0}'", indexable.Uri); } // If nothing was removed, no need for change notification if (r.NumRemoved <= 0) { continue; } // Add the removed Uri to the list for our // change data. This will be an external Uri // when we are remapping. removed_uris.Add(notification_uri); } else if (receipts [i] is IndexerIndexablesReceipt) { indexer_indexable_receipt = true; } } if (!continue_only) { lock (request_lock) { pending_request.DeferredIndexables = flushed_request.DeferredIndexables; } } if (indexer_indexable_receipt) { Log.Debug("Indexing of indexer generated indexables is paused. Scheduling job to continue."); // Create a task asking the indexer to continue indexing Scheduler.Task task; task = Scheduler.TaskFromHook(new Scheduler.TaskHook(ContinueIndexerIndexableIndexing)); // Schedule it so that it is the immediate next task to be scheduled task.Priority = Scheduler.Priority.Immediate; task.SubPriority = 100; task.Source = this; task.Tag = "Continue indexing generated indexables from " + IndexName; ThisScheduler.Add(task); } if (fa_store != null) { fa_store.CommitTransaction(); } // Propagate the change notification to any open queries. if (added_uris.Count > 0 || removed_uris.Count > 0) { ChangeData change_data; change_data = new ChangeData(); change_data.AddedUris = added_uris; change_data.RemovedUris = removed_uris; QueryDriver.QueryableChanged(this, change_data); } }