//performance stat hacking public void set_last_performance_stat_to_character(CharacterIndex aChar) { //clear out older ages while (mPerformanceStats.Count > 0 && mPerformanceStats.Last().Character.LevelIndex >= aChar.LevelIndex) { mSunsetManager.remove_last_character(); mPerformanceStats.RemoveAt(mPerformanceStats.Count - 1); } //fill in yonuger ages randomly while (mPerformanceStats.Count > 0 && mPerformanceStats.Last().Character.LevelIndex < aChar.LevelIndex - 1) { PerformanceStats stat = new PerformanceStats(new CharacterIndex(mPerformanceStats.Last().Character.LevelIndex + 1, Random.Range(0, 4))); if (GameConstants.allPerfectOnSkip) { stat.update_score(0, 1); stat.update_score(1, 1); } else { stat.update_score(0, Random.value); stat.update_score(1, Random.value); } stat.Stats = mManager.mGameManager.CharacterHelper.Characters[stat.Character]; mPerformanceStats.Add(stat); mSunsetManager.add_character(stat.Character, !stat.BadPerformance, false); } PerformanceStats realStat = new PerformanceStats(aChar); realStat.Stats = mManager.mGameManager.CharacterHelper.Characters[realStat.Character]; mPerformanceStats.Add(realStat); }
public void Records_ForEmptyCounter_YieldsEmptySingleResultSetForCurrentPeriod() { var sut = new PerformanceStats(TimeSpan.FromMinutes(5), It.IsAny <int>()); var item = sut.Records.First(); item.StartTime.Should().BeWithin(TimeSpan.FromMilliseconds(50)).Before(DateTime.Now); item.EndTime.Should().BeWithin(TimeSpan.FromMilliseconds(50)).Before(DateTime.Now.AddMinutes(5)); }
public void ResetStats() { combinedStats = new PerformanceStats(); foreach (ModelBasedValue <int[], actionType> m in models) { m.ResetStats(); } }
public async Task TidyRecords_WhenAddingRecords_ShouldNotOverflowMaxRecordCount() { var sut = new PerformanceStats(TimeSpan.FromMilliseconds(30), 3); await Task.Delay(150); var records = (Queue <PerformanceRecord>)sut.Records; sut.TidyRecords(); records.Count.Should().Be(3); }
public override PerformanceStats getStats() { combinedStats.modelAccesses = 0; combinedStats.modelUpdates = 0; combinedStats.cumulativeReward = models[0].getStats().cumulativeReward; foreach (ActionValue <int[], actionType> model in models) { PerformanceStats thisStats = model.getStats(); combinedStats.modelAccesses += thisStats.modelAccesses; combinedStats.modelUpdates += thisStats.modelUpdates; } return(combinedStats); }
static IAutomobile CreateAutomobile(string name, string model, string notes, PerformanceStats perfStats, string manufacturerName) { var manufacturer = ManufacturerRepository.Instance.FindAll(m => m.Name == manufacturerName).FirstOrDefault(); return(new Automobile() { Id = Guid.NewGuid(), Manufacturer = manufacturer, Model = model, Name = name, Notes = notes, PerfStats = perfStats }); }
public async Task MessageProcessed_ShouldGrowQueueIfNewEventIsLaterThanLatestTimePeriod() { var sut = new PerformanceStats(TimeSpan.FromMilliseconds(50), 3); sut.MessageProcessed("something arbitrary"); await Task.Delay(60); sut.Records.Should().HaveCount(2); await Task.Delay(60); sut.MessageProcessed("something arbitrary"); sut.Records.Should().HaveCount(3); }
public async Task MessageProcessed_ShouldNotGrowQueueIfNewEventIsWithinTheLatestTimePeriod() { var sut = new PerformanceStats(TimeSpan.FromHours(1), 3); await Task.Delay(5); sut.MessageProcessed("something arbitrary"); await Task.Delay(5); sut.Records.Should().HaveCount(1); sut.MessageProcessed("something arbitrary"); await Task.Delay(5); sut.Records.Should().HaveCount(1); }
//this gets called during CHOOSE so BB should be full sized //this gets called by NewGameManager public void begin_new_character(PerformanceStats aChar) { hide_interface(true); var nameFrame = aChar.Character == CharacterIndex.sFetus ? mManager.mCharacterBundleManager.get_image(GameStrings.GetString("NIMIMG1")) : aChar.Character == CharacterIndex.sOneHundred ? mManager.mCharacterBundleManager.get_image(GameStrings.GetString("NIMIMG2")) : mManager.mCharacterBundleManager.get_image("TEXTBOX"); var scoreFrame = aChar.Character == CharacterIndex.sFetus ? mManager.mCharacterBundleManager.get_image("SCORE-FETUS") : aChar.Character == CharacterIndex.sOneHundred ? mManager.mCharacterBundleManager.get_image("SCORE-110") : mManager.mCharacterBundleManager.get_image("SCORE"); mBBNameTextFrame.set_new_texture(nameFrame.Image, nameFrame.Data.Size); mBBScoreFrame.set_new_texture(scoreFrame.Image, scoreFrame.Data.Size); Vector3 textOffset = new Vector3(70, 115, 0) / 2; mBBNameTextFrame.HardPosition = mBBMultiplierImage.HardPosition + new Vector3(mBBNameTextFrame.BoundingBox.width, 140, 0) / 2 + textOffset; mBBScoreFrame.HardPosition = mBBMultiplierImage.HardPosition + new Vector3(mBBScoreFrame.BoundingBox.width, -140, 0) / 2 + textOffset; if (!(aChar.Character == CharacterIndex.sFetus || aChar.Character == CharacterIndex.sOneHundred)) { mBBNameText.Text = aChar.Character.Description.ToUpper() + " (" + aChar.Character.Age.ToString() + ")"; var origPos = mBBNameTextFrame.SoftPosition - new Vector3(mBBNameTextFrame.BoundingBox.width / 2f, 0, 0); float newWidth = mBBNameText.BoundingBox.width + 300; mBBNameTextFrame.mImage.pixel_crop(new Rect(0, 0, newWidth, mBBNameTextFrame.mImage.BaseDimension.y)); mBBNameTextFrame.HardPosition = origPos + new Vector3(newWidth / 2f, 0, 0); string[] labelNames = new string[] { "label_easy_BIG", "label_normal_BIG", "label_hard_BIG", "label_extreme_BIG" }; var diffImage = mManager.mCharacterBundleManager.get_image(labelNames[aChar.Stats.Difficulty]); mBBMultiplierImage.set_new_texture(diffImage.Image, diffImage.Data.Size); mBBMultiplierImage.HardColor = GameConstants.UiWhite; } else { //reset to original pixel crop (for when soft restarting the game) mBBNameTextFrame.mImage.pixel_crop(new Rect(0, 0, mBBNameTextFrame.mImage.BaseDimension.x, mBBNameTextFrame.mImage.BaseDimension.y)); mBBNameText.Text = ""; mBBMultiplierImage.HardColor = GameConstants.UiWhiteTransparent; } }
void OnGUI() { UnityOnGuiIsRepaint = (Event.current.rawType == EventType.Repaint); if (!ComponentsInitialized) { InitializeComponents(); } if (previousGuiScale != GuiScale) { previousGuiScale = GuiScale; UpdateStateResolutions(); } if (Screen.height != previousResolution.height || Screen.width != previousResolution.width) { Trace.Log("Screen resolution changed to {0}x{1}", Screen.width, Screen.height); previousResolution.width = Screen.width; previousResolution.height = Screen.height; GuiScaleManager.AutoSetGuiScale(); UpdateStateResolutions(); } if (GUI.skin != Skin) { GUI.skin = Skin; } if (Time.frameCount != lastOnGuiCall) { // this gets called once per frame. PerformanceStatsLastFrame = PerformanceStatsInProgress; PerformanceStatsInProgress.Clear(); PerformanceStatsInProgress.FrameTime = Time.deltaTime; PerformanceStatsInProgress.UpdateTime = MeasureExecutionTime(DoUpdate); lastOnGuiCall = Time.frameCount; } PerformanceStatsInProgress.GuiCallRegister.Add(Event.current.ToString()); PerformanceStatsInProgress.DrawTime += MeasureExecutionTime(DoDraw); }
public async Task TidyRecords_WhenAddingRecords_ShouldGrowUpToMaximumSize() { var sut = new PerformanceStats(TimeSpan.FromMilliseconds(50), 3); await Task.Delay(60); sut.MessageProcessed("something arbitrary"); sut.Records.Should().HaveCount(2); await Task.Delay(60); sut.MessageProcessed("something arbitrary"); sut.Records.Should().HaveCount(3); sut.MessageProcessed("something arbitrary"); sut.Records.Should().HaveCount(3); }
public async Task RecordsAfter_ShouldReturnAllRecordsStartingAfterTheSuppliedDate() { var sut = new PerformanceStats(TimeSpan.FromMilliseconds(30), 3); var records = (Queue <PerformanceRecord>)sut.Records; records.Clear(); var first = new PerformanceRecord(It.IsAny <TimeSpan>(), new DateTime(1, 2, 3)); var second = new PerformanceRecord(It.IsAny <TimeSpan>(), first.StartTime.AddTicks(1)); records.Enqueue(first); records.Enqueue(second); var foundRecords = sut.RecordsAfter(first.StartTime); foundRecords.Should().BeEquivalentTo(new[] { second }); }
public override double distance(Instance first, Instance second, double cutOffValue, PerformanceStats stats) { double sqDistance = 0; int numAttributes = m_Data.numAttributes(); validate(); double diff; for (int i = 0; i < numAttributes; i++) { diff = 0; if (m_ActiveIndices[i]) { diff = difference(i, first.stringValue(i), second.stringValue(i)); } sqDistance = updateDistance(sqDistance, diff); if (sqDistance > (cutOffValue * cutOffValue)) return Double.PositiveInfinity; } double distance = Math.Sqrt(sqDistance); return distance; }
internal EventStreamSubscriber(EventStreamSubscriberSettings settings) { _connection = settings.Connection; _eventHandlerResolver = settings.EventHandlerResolver; _streamPositionRepository = settings.StreamPositionRepository; _subscriptionTimerManager = settings.SubscriptionTimerManager; _eventTypeResolver = settings.EventTypeResolver; _defaultPollingInterval = settings.DefaultPollingInterval; _sliceSize = settings.SliceSize; _longPollingTimeout = settings.LongPollingTimeout; _performanceMonitors = settings.PerformanceMonitors; _log = settings.Log; _eventNotFoundRetryCount = settings.EventNotFoundRetryCount; _eventNotFoundRetryDelay = settings.EventNotFoundRetryDelay; StreamSubscriberMonitor = settings.SubscriberIntervalMonitor; AllEventsStats = new PerformanceStats(settings.MessageProcessingStatsWindowPeriod, settings.MessageProcessingStatsWindowCount); ProcessedEventsStats = new PerformanceStats(settings.MessageProcessingStatsWindowPeriod, settings.MessageProcessingStatsWindowCount); }
public async Task TidyRecords_WhenAddingRecords_ShouldFillInGapsInBuketsIfNecessary() { var sut = new PerformanceStats(TimeSpan.FromMilliseconds(30), 3); await Task.Delay(150); var records = (Queue <PerformanceRecord>)sut.Records; sut.TidyRecords(); var now = DateTime.Now; var last = records.Skip(2).First(); var middle = records.Skip(1).First(); var first = records.Skip(0).First(); last.EndTime.Should().BeWithin(TimeSpan.FromMilliseconds(50)).After(now); last.EndTime.Should().BeWithin(TimeSpan.FromMilliseconds(50)).Before(now); var oneMS = TimeSpan.FromMilliseconds(1); last.StartTime.Should().BeWithin(oneMS).After(middle.EndTime); middle.StartTime.Should().BeWithin(oneMS).After(first.EndTime); }
public override PerformanceStats getStats() { combinedStats.cumulativeReward = 0; combinedStats.modelAccesses = 0; combinedStats.modelUpdates = 0; foreach (MultiResValue <stateType, actionType> m in models) { PerformanceStats thisStats = m.getStats(); combinedStats.cumulativeReward += thisStats.cumulativeReward; combinedStats.modelAccesses += thisStats.modelAccesses; combinedStats.modelUpdates += thisStats.modelUpdates; } if (!models.Contains(currentModel)) { PerformanceStats thisStats = currentModel.getStats(); combinedStats.cumulativeReward += thisStats.cumulativeReward; combinedStats.modelAccesses += thisStats.modelAccesses; combinedStats.modelUpdates += thisStats.modelUpdates; } return(combinedStats); }
public override void Remove(string[] keys, WorkContext context) { DeletionBatchInfo deletionBatchInfo = null; try { deletionBatchInfo = context.ReportDeletionBatchStarted(PublicName, keys.Length); Write((writer, analyzer, stats) => { var indexUpdateTriggersDuration = new Stopwatch(); stats.Operation = IndexingWorkStats.Status.Ignore; if (logIndexing.IsDebugEnabled) { logIndexing.Debug(() => string.Format("Deleting ({0}) from {1}", string.Join(", ", keys), PublicName)); } List <AbstractIndexUpdateTriggerBatcher> batchers; using (StopwatchScope.For(indexUpdateTriggersDuration)) { batchers = context.IndexUpdateTriggers.Select(x => x.CreateBatcher(indexId)) .Where(x => x != null) .ToList(); keys.Apply(key => InvokeOnIndexEntryDeletedOnAllBatchers(batchers, new Term(Constants.DocumentIdFieldName, key.ToLowerInvariant()))); } var deleteDocumentsDuration = new Stopwatch(); using (StopwatchScope.For(deleteDocumentsDuration)) { writer.DeleteDocuments(keys.Select(k => new Term(Constants.DocumentIdFieldName, k.ToLowerInvariant())).ToArray()); } deletionBatchInfo.PerformanceStats.Add(PerformanceStats.From(IndexingOperation.Delete_Documents, deleteDocumentsDuration.ElapsedMilliseconds)); using (StopwatchScope.For(indexUpdateTriggersDuration)) { batchers.ApplyAndIgnoreAllErrors( e => { logIndexing.WarnException("Failed to dispose on index update trigger in " + PublicName, e); context.AddError(indexId, PublicName, null, e, "Dispose Trigger"); }, batcher => batcher.Dispose()); } deletionBatchInfo.PerformanceStats.Add(PerformanceStats.From(IndexingOperation.Delete_IndexUpdateTriggers, indexUpdateTriggersDuration.ElapsedMilliseconds)); return(new IndexedItemsInfo(GetLastEtagFromStats()) { ChangedDocs = keys.Length, DeletedKeys = keys }); }, deletionBatchInfo.PerformanceStats); } finally { if (deletionBatchInfo != null) { context.ReportDeletionBatchCompleted(deletionBatchInfo); } } }
public bool Init(Int32 NewAgentProcessID) { Log(EVerbosityLevel.Verbose, ELogColour.Green, " ...... checking certificate"); string AgentFileName = Assembly.GetExecutingAssembly().Location; try { Certificate = X509Certificate.CreateFromSignedFile(AgentFileName); } catch (Exception) { // Any exception means that either the file isn't signed or has an invalid certificate } if (Certificate != null) { // If we have a valid certificate, do the rest of the security checks bool bSecurityCheckPassed = false; // Check the certificate for some basic information to make sure it's ours byte[] EpicCertificateSerialNumber = { 0x42, 0x58, 0xa1, 0xd9, 0x82, 0x4b, 0x70, 0xe5, 0x07, 0x19, 0x96, 0xd8, 0xda, 0xcd, 0x16, 0x1c }; byte[] OtherCertificateSerialNumber = Certificate.GetSerialNumber(); if (EpicCertificateSerialNumber.Length == OtherCertificateSerialNumber.Length) { bool bSerialNumbersAreEqual = true; for (Int32 Index = 0; Index < EpicCertificateSerialNumber.Length; Index++) { bSerialNumbersAreEqual &= (EpicCertificateSerialNumber[Index] == OtherCertificateSerialNumber[Index]); } // If the certificate checks out, move onto validating our known libraries if (bSerialNumbersAreEqual) { bool bLibrariesAreOkay = true; try { X509Certificate NextCertificate; string BasePath = Path.GetDirectoryName(AgentFileName); string[] LibrariesToCheck = { Path.Combine(BasePath, "AgentInterface.dll"), Path.Combine(BasePath, "UnrealControls.dll"), Path.Combine(BasePath, "SwarmCoordinatorInterface.dll"), }; foreach (string NextLibrary in LibrariesToCheck) { NextCertificate = X509Certificate.CreateFromSignedFile(NextLibrary); bLibrariesAreOkay &= NextCertificate.Equals(Certificate); } } catch (Exception) { // If any of them fail, they all fail bLibrariesAreOkay = false; } // If we get here and the libraries are okay, we're done validating if (bLibrariesAreOkay) { bSecurityCheckPassed = true; } } } if (bSecurityCheckPassed == false) { // Not what we expect, ditch it Certificate = null; } } if (Certificate == null) { Log(EVerbosityLevel.Informative, ELogColour.Orange, " ......... certificate check has failed"); } Log(EVerbosityLevel.Informative, ELogColour.Green, " ...... initializing cache"); if (InitCache() == false) { // Failed to initialize the cache properly, fail return(false); } // Initialize the coordinator connection Log(EVerbosityLevel.Informative, ELogColour.Green, " ...... initializing connection to SwarmCoordinator"); InitCoordinator(); // Initialize the local performance monitoring object Log(EVerbosityLevel.Informative, ELogColour.Green, " ...... initializing local performance monitoring subsystem"); try { LocalPerformanceStats = new PerformanceStats(); } catch (Exception Ex) { Log(EVerbosityLevel.Informative, ELogColour.Orange, " ...... local performance monitoring subsystem initialization failed"); Log(EVerbosityLevel.Verbose, ELogColour.Orange, Ex.ToString()); } // Startup the message processing thread ThreadStart ThreadStartDelegateMessages = new ThreadStart(ProcessMessagesThreadProc); ProcessMessageThread = new Thread(ThreadStartDelegateMessages); ProcessMessageThread.Name = "ProcessMessageThread"; ProcessMessageThread.Start(); // Set the next times we should run some of the idle task processors NextCleanUpCacheTime = DateTime.UtcNow + TimeSpan.FromSeconds(30); // Set the owning process ID AgentProcessID = NewAgentProcessID; // Finally, signal that we're fully initialized InitializedTime = DateTime.UtcNow; Initialized.Set(); return(true); }
public bool Init( Int32 NewAgentProcessID ) { Log( EVerbosityLevel.Verbose, ELogColour.Green, " ...... checking certificate" ); string AgentFileName = Assembly.GetExecutingAssembly().Location; try { Certificate = X509Certificate.CreateFromSignedFile( AgentFileName ); } catch( Exception ) { // Any exception means that either the file isn't signed or has an invalid certificate } if( Certificate != null ) { // If we have a valid certificate, do the rest of the security checks bool bSecurityCheckPassed = false; // Check the certificate for some basic information to make sure it's ours byte[] EpicCertificateSerialNumber = { 0x42, 0x58, 0xa1, 0xd9, 0x82, 0x4b, 0x70, 0xe5, 0x07, 0x19, 0x96, 0xd8, 0xda, 0xcd, 0x16, 0x1c }; byte[] OtherCertificateSerialNumber = Certificate.GetSerialNumber(); if( EpicCertificateSerialNumber.Length == OtherCertificateSerialNumber.Length ) { bool bSerialNumbersAreEqual = true; for( Int32 Index = 0; Index < EpicCertificateSerialNumber.Length; Index++ ) { bSerialNumbersAreEqual &= ( EpicCertificateSerialNumber[Index] == OtherCertificateSerialNumber[Index] ); } // If the certificate checks out, move onto validating our known libraries if( bSerialNumbersAreEqual ) { bool bLibrariesAreOkay = true; try { X509Certificate NextCertificate; string BasePath = Path.GetDirectoryName( AgentFileName ); string[] LibrariesToCheck = { Path.Combine( BasePath, "AgentInterface.dll" ), Path.Combine( BasePath, "UnrealControls.dll" ), Path.Combine( BasePath, "SwarmCoordinatorInterface.dll" ), }; foreach( string NextLibrary in LibrariesToCheck ) { NextCertificate = X509Certificate.CreateFromSignedFile( NextLibrary ); bLibrariesAreOkay &= NextCertificate.Equals( Certificate ); } } catch( Exception ) { // If any of them fail, they all fail bLibrariesAreOkay = false; } // If we get here and the libraries are okay, we're done validating if( bLibrariesAreOkay ) { bSecurityCheckPassed = true; } } } if( bSecurityCheckPassed == false ) { // Not what we expect, ditch it Certificate = null; } } if( Certificate == null ) { Log( EVerbosityLevel.Informative, ELogColour.Orange, " ......... certificate check has failed" ); } Log( EVerbosityLevel.Informative, ELogColour.Green, " ...... initializing cache" ); if( InitCache() == false ) { // Failed to initialize the cache properly, fail return false; } // Initialize the coordinator connection Log( EVerbosityLevel.Informative, ELogColour.Green, " ...... initializing connection to SwarmCoordinator" ); InitCoordinator(); // Initialize the local performance monitoring object Log( EVerbosityLevel.Informative, ELogColour.Green, " ...... initializing local performance monitoring subsystem" ); try { LocalPerformanceStats = new PerformanceStats(); } catch ( Exception Ex ) { Log( EVerbosityLevel.Informative, ELogColour.Orange, " ...... local performance monitoring subsystem initialization failed" ); Log( EVerbosityLevel.Verbose, ELogColour.Orange, Ex.ToString() ); } // Startup the message processing thread ThreadStart ThreadStartDelegateMessages = new ThreadStart( ProcessMessagesThreadProc ); ProcessMessageThread = new Thread( ThreadStartDelegateMessages ); ProcessMessageThread.Name = "ProcessMessageThread"; ProcessMessageThread.Start(); // Set the next times we should run some of the idle task processors NextCleanUpCacheTime = DateTime.UtcNow + TimeSpan.FromSeconds(30); // Set the owning process ID AgentProcessID = NewAgentProcessID; // Finally, signal that we're fully initialized InitializedTime = DateTime.UtcNow; Initialized.Set(); return true; }
public IndexingPerformanceStats ExecuteReduction() { var count = 0; var sourceCount = 0; var addDocumentDuration = new Stopwatch(); var convertToLuceneDocumentDuration = new Stopwatch(); var linqExecutionDuration = new Stopwatch(); var deleteExistingDocumentsDuration = new Stopwatch(); var writeToIndexStats = new List <PerformanceStats>(); IndexingPerformanceStats performance = null; parent.Write((indexWriter, analyzer, stats) => { stats.Operation = IndexingWorkStats.Status.Reduce; try { if (Level == 2) { RemoveExistingReduceKeysFromIndex(indexWriter, deleteExistingDocumentsDuration); } foreach (var mappedResults in MappedResultsByBucket) { var input = mappedResults.Select(x => { sourceCount++; return(x); }); IndexingFunc reduceDefinition = ViewGenerator.ReduceDefinition; foreach (var doc in parent.RobustEnumerationReduce(input.GetEnumerator(), reduceDefinition, stats, linqExecutionDuration)) { count++; switch (Level) { case 0: case 1: string reduceKeyAsString = ExtractReduceKey(ViewGenerator, doc); Actions.MapReduce.PutReducedResult(indexId, reduceKeyAsString, Level + 1, mappedResults.Key, mappedResults.Key / 1024, ToJsonDocument(doc)); Actions.General.MaybePulseTransaction(); break; case 2: WriteDocumentToIndex(doc, indexWriter, analyzer, convertToLuceneDocumentDuration, addDocumentDuration); break; default: throw new InvalidOperationException("Unknown level: " + Level); } stats.ReduceSuccesses++; } } } catch (Exception e) { if (Level == 2) { batchers.ApplyAndIgnoreAllErrors( ex => { logIndexing.WarnException("Failed to notify index update trigger batcher about an error", ex); Context.AddError(indexId, parent.indexDefinition.Name, null, ex, "AnErrorOccured Trigger"); }, x => x.AnErrorOccured(e)); } throw; } finally { if (Level == 2) { batchers.ApplyAndIgnoreAllErrors( e => { logIndexing.WarnException("Failed to dispose on index update trigger", e); Context.AddError(indexId, parent.indexDefinition.Name, null, e, "Dispose Trigger"); }, x => x.Dispose()); } // TODO: Check if we need to report "Bucket Counts" or "Total Input Elements"? performance = parent.RecordCurrentBatch("Current Reduce #" + Level, "Reduce Level " + Level, sourceCount); } return(new IndexedItemsInfo(null) { ChangedDocs = count + ReduceKeys.Count }); }, writeToIndexStats); var performanceStats = new List <BasePerformanceStats>(); performanceStats.Add(PerformanceStats.From(IndexingOperation.Linq_ReduceLinqExecution, linqExecutionDuration.ElapsedMilliseconds)); performanceStats.Add(PerformanceStats.From(IndexingOperation.Lucene_DeleteExistingDocument, deleteExistingDocumentsDuration.ElapsedMilliseconds)); performanceStats.Add(PerformanceStats.From(IndexingOperation.Lucene_ConvertToLuceneDocument, convertToLuceneDocumentDuration.ElapsedMilliseconds)); performanceStats.Add(PerformanceStats.From(IndexingOperation.Lucene_AddDocument, addDocumentDuration.ElapsedMilliseconds)); performanceStats.AddRange(writeToIndexStats); parent.BatchCompleted("Current Reduce #" + Level, "Reduce Level " + Level, sourceCount, count, performanceStats); logIndexing.Debug(() => string.Format("Reduce resulted in {0} entries for {1} for reduce keys: {2}", count, indexId, string.Join(", ", ReduceKeys))); return(performance); }
public override IndexingPerformanceStats IndexDocuments(AbstractViewGenerator viewGenerator, IndexingBatch batch, IStorageActionsAccessor actions, DateTime minimumTimestamp, CancellationToken token) { token.ThrowIfCancellationRequested(); var count = 0; var sourceCount = 0; var deleted = new Dictionary <ReduceKeyAndBucket, int>(); var performance = RecordCurrentBatch("Current Map", "Map", batch.Docs.Count); var performanceStats = new List <BasePerformanceStats>(); var usedStorageAccessors = new ConcurrentSet <IStorageActionsAccessor>(); if (usedStorageAccessors.TryAdd(actions)) { var storageCommitDuration = new Stopwatch(); actions.BeforeStorageCommit += storageCommitDuration.Start; actions.AfterStorageCommit += () => { storageCommitDuration.Stop(); performanceStats.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds)); }; } var deleteMappedResultsDuration = new Stopwatch(); var documentsWrapped = batch.Docs.Select(doc => { token.ThrowIfCancellationRequested(); sourceCount++; var documentId = doc.__document_id; using (StopwatchScope.For(deleteMappedResultsDuration)) { actions.MapReduce.DeleteMappedResultsForDocumentId((string)documentId, indexId, deleted); } return(doc); }) .Where(x => x is FilteredDocument == false) .ToList(); performanceStats.Add(new PerformanceStats { Name = IndexingOperation.Map_DeleteMappedResults, DurationMs = deleteMappedResultsDuration.ElapsedMilliseconds, }); var allReferencedDocs = new ConcurrentQueue <IDictionary <string, HashSet <string> > >(); var allReferenceEtags = new ConcurrentQueue <IDictionary <string, Etag> >(); var allState = new ConcurrentQueue <Tuple <HashSet <ReduceKeyAndBucket>, IndexingWorkStats, Dictionary <string, int> > >(); var parallelOperations = new ConcurrentQueue <ParallelBatchStats>(); var parallelProcessingStart = SystemTime.UtcNow; BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, documentsWrapped, partition => { token.ThrowIfCancellationRequested(); var parallelStats = new ParallelBatchStats { StartDelay = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds }; var localStats = new IndexingWorkStats(); var localChanges = new HashSet <ReduceKeyAndBucket>(); var statsPerKey = new Dictionary <string, int>(); var linqExecutionDuration = new Stopwatch(); var reduceInMapLinqExecutionDuration = new Stopwatch(); var putMappedResultsDuration = new Stopwatch(); var convertToRavenJObjectDuration = new Stopwatch(); allState.Enqueue(Tuple.Create(localChanges, localStats, statsPerKey)); using (CurrentIndexingScope.Current = new CurrentIndexingScope(context.Database, PublicName)) { // we are writing to the transactional store from multiple threads here, and in a streaming fashion // should result in less memory and better perf context.TransactionalStorage.Batch(accessor => { if (usedStorageAccessors.TryAdd(accessor)) { var storageCommitDuration = new Stopwatch(); accessor.BeforeStorageCommit += storageCommitDuration.Start; accessor.AfterStorageCommit += () => { storageCommitDuration.Stop(); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds)); }; } var mapResults = RobustEnumerationIndex(partition, viewGenerator.MapDefinitions, localStats, linqExecutionDuration); var currentDocumentResults = new List <object>(); string currentKey = null; bool skipDocument = false; foreach (var currentDoc in mapResults) { token.ThrowIfCancellationRequested(); var documentId = GetDocumentId(currentDoc); if (documentId != currentKey) { count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey, reduceInMapLinqExecutionDuration, putMappedResultsDuration, convertToRavenJObjectDuration); currentDocumentResults.Clear(); currentKey = documentId; } else if (skipDocument) { continue; } RavenJObject currentDocJObject; using (StopwatchScope.For(convertToRavenJObjectDuration)) { currentDocJObject = RavenJObject.FromObject(currentDoc, jsonSerializer); } currentDocumentResults.Add(new DynamicJsonObject(currentDocJObject)); if (EnsureValidNumberOfOutputsForDocument(documentId, currentDocumentResults.Count) == false) { skipDocument = true; currentDocumentResults.Clear(); continue; } Interlocked.Increment(ref localStats.IndexingSuccesses); } count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey, reduceInMapLinqExecutionDuration, putMappedResultsDuration, convertToRavenJObjectDuration); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.LoadDocument, CurrentIndexingScope.Current.LoadDocumentDuration.ElapsedMilliseconds)); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Linq_MapExecution, linqExecutionDuration.ElapsedMilliseconds)); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Linq_ReduceLinqExecution, reduceInMapLinqExecutionDuration.ElapsedMilliseconds)); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_PutMappedResults, putMappedResultsDuration.ElapsedMilliseconds)); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_ConvertToRavenJObject, convertToRavenJObjectDuration.ElapsedMilliseconds)); parallelOperations.Enqueue(parallelStats); }); allReferenceEtags.Enqueue(CurrentIndexingScope.Current.ReferencesEtags); allReferencedDocs.Enqueue(CurrentIndexingScope.Current.ReferencedDocuments); } }); performanceStats.Add(new ParallelPerformanceStats { NumberOfThreads = parallelOperations.Count, DurationMs = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds, BatchedOperations = parallelOperations.ToList() }); var updateDocumentReferencesDuration = new Stopwatch(); using (StopwatchScope.For(updateDocumentReferencesDuration)) { UpdateDocumentReferences(actions, allReferencedDocs, allReferenceEtags); } performanceStats.Add(PerformanceStats.From(IndexingOperation.UpdateDocumentReferences, updateDocumentReferencesDuration.ElapsedMilliseconds)); var changed = allState.SelectMany(x => x.Item1).Concat(deleted.Keys) .Distinct() .ToList(); var stats = new IndexingWorkStats(allState.Select(x => x.Item2)); var reduceKeyStats = allState.SelectMany(x => x.Item3) .GroupBy(x => x.Key) .Select(g => new { g.Key, Count = g.Sum(x => x.Value) }) .ToList(); BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, reduceKeyStats, enumerator => context.TransactionalStorage.Batch(accessor => { while (enumerator.MoveNext()) { var reduceKeyStat = enumerator.Current; accessor.MapReduce.IncrementReduceKeyCounter(indexId, reduceKeyStat.Key, reduceKeyStat.Count); } })); actions.General.MaybePulseTransaction(); var parallelReductionOperations = new ConcurrentQueue <ParallelBatchStats>(); var parallelReductionStart = SystemTime.UtcNow; BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, changed, enumerator => context.TransactionalStorage.Batch(accessor => { var parallelStats = new ParallelBatchStats { StartDelay = (long)(SystemTime.UtcNow - parallelReductionStart).TotalMilliseconds }; var scheduleReductionsDuration = new Stopwatch(); using (StopwatchScope.For(scheduleReductionsDuration)) { while (enumerator.MoveNext()) { accessor.MapReduce.ScheduleReductions(indexId, 0, enumerator.Current); accessor.General.MaybePulseTransaction(); } } parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_ScheduleReductions, scheduleReductionsDuration.ElapsedMilliseconds)); parallelReductionOperations.Enqueue(parallelStats); })); performanceStats.Add(new ParallelPerformanceStats { NumberOfThreads = parallelReductionOperations.Count, DurationMs = (long)(SystemTime.UtcNow - parallelReductionStart).TotalMilliseconds, BatchedOperations = parallelReductionOperations.ToList() }); UpdateIndexingStats(context, stats); performance.OnCompleted = () => BatchCompleted("Current Map", "Map", sourceCount, count, performanceStats); logIndexing.Debug("Mapped {0} documents for {1}", count, indexId); return(performance); }
private ReducingPerformanceStats SingleStepReduce(IndexToWorkOn index, List <string> keysToReduce, AbstractViewGenerator viewGenerator, ConcurrentSet <object> itemsToDelete, CancellationToken token) { var needToMoveToSingleStepQueue = new ConcurrentQueue <HashSet <string> >(); if (Log.IsDebugEnabled) { Log.Debug(() => string.Format("Executing single step reducing for {0} keys [{1}]", keysToReduce.Count, string.Join(", ", keysToReduce))); } var batchTimeWatcher = Stopwatch.StartNew(); var reducingBatchThrottlerId = Guid.NewGuid(); var reducePerformanceStats = new ReducingPerformanceStats(ReduceType.SingleStep); var reduceLevelStats = new ReduceLevelPeformanceStats { Started = SystemTime.UtcNow, Level = 2 }; try { var parallelOperations = new ConcurrentQueue <ParallelBatchStats>(); var parallelProcessingStart = SystemTime.UtcNow; BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, keysToReduce, enumerator => { var parallelStats = new ParallelBatchStats { StartDelay = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds }; var localNeedToMoveToSingleStep = new HashSet <string>(); needToMoveToSingleStepQueue.Enqueue(localNeedToMoveToSingleStep); var localKeys = new HashSet <string>(); while (enumerator.MoveNext()) { token.ThrowIfCancellationRequested(); localKeys.Add(enumerator.Current); } transactionalStorage.Batch(actions => { var getItemsToReduceParams = new GetItemsToReduceParams(index: index.IndexId, reduceKeys: new HashSet <string>(localKeys), level: 0, loadData: false, itemsToDelete: itemsToDelete) { Take = int.MaxValue // just get all, we do the rate limit when we load the number of keys to reduce, anyway }; var getItemsToReduceDuration = Stopwatch.StartNew(); int scheduledItemsSum = 0; int scheduledItemsCount = 0; List <int> scheduledItemsMappedBuckets = new List <int>(); using (StopwatchScope.For(getItemsToReduceDuration)) { foreach (var item in actions.MapReduce.GetItemsToReduce(getItemsToReduceParams, token)) { scheduledItemsMappedBuckets.Add(item.Bucket); scheduledItemsSum += item.Size; scheduledItemsCount++; } } parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_GetItemsToReduce, getItemsToReduceDuration.ElapsedMilliseconds)); autoTuner.CurrentlyUsedBatchSizesInBytes.GetOrAdd(reducingBatchThrottlerId, scheduledItemsSum); if (scheduledItemsCount == 0) { // Here we have an interesting issue. We have scheduled reductions, because GetReduceTypesPerKeys() returned them // and at the same time, we don't have any at level 0. That probably means that we have them at level 1 or 2. // They shouldn't be here, and indeed, we remove them just a little down from here in this function. // That said, they might have smuggled in between versions, or something happened to cause them to be here. // In order to avoid that, we forcibly delete those extra items from the scheduled reductions, and move on Log.Warn("Found single reduce items ({0}) that didn't have any items to reduce. Deleting level 1 & level 2 items for those keys. (If you can reproduce this, please contact [email protected])", string.Join(", ", keysToReduce)); var deletingScheduledReductionsDuration = Stopwatch.StartNew(); using (StopwatchScope.For(deletingScheduledReductionsDuration)) { foreach (var reduceKey in keysToReduce) { token.ThrowIfCancellationRequested(); actions.MapReduce.DeleteScheduledReduction(index.IndexId, 1, reduceKey); actions.MapReduce.DeleteScheduledReduction(index.IndexId, 2, reduceKey); } } parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_DeleteScheduledReductions, deletingScheduledReductionsDuration.ElapsedMilliseconds)); } var removeReduceResultsDuration = new Stopwatch(); foreach (var reduceKey in localKeys) { token.ThrowIfCancellationRequested(); var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexId, reduceKey); if (lastPerformedReduceType != ReduceType.SingleStep) { localNeedToMoveToSingleStep.Add(reduceKey); } if (lastPerformedReduceType != ReduceType.MultiStep) { continue; } if (Log.IsDebugEnabled) { Log.Debug("Key {0} was moved from multi step to single step reduce, removing existing reduce results records", reduceKey); } using (StopwatchScope.For(removeReduceResultsDuration)) { // now we are in single step but previously multi step reduce was performed for the given key var mappedBuckets = actions.MapReduce.GetMappedBuckets(index.IndexId, reduceKey, token); // add scheduled items too to be sure we will delete reduce results of already deleted documents foreach (var mappedBucket in mappedBuckets.Union(scheduledItemsMappedBuckets)) { actions.MapReduce.RemoveReduceResults(index.IndexId, 1, reduceKey, mappedBucket); actions.MapReduce.RemoveReduceResults(index.IndexId, 2, reduceKey, mappedBucket / 1024); } } } parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_RemoveReduceResults, removeReduceResultsDuration.ElapsedMilliseconds)); parallelOperations.Enqueue(parallelStats); }); }); reduceLevelStats.Operations.Add(new ParallelPerformanceStats { NumberOfThreads = parallelOperations.Count, DurationMs = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds, BatchedOperations = parallelOperations.ToList() }); var getMappedResultsDuration = new Stopwatch(); var reductionPerformanceStats = new List <IndexingPerformanceStats>(); var keysLeftToReduce = new HashSet <string>(keysToReduce); while (keysLeftToReduce.Count > 0) { var keysReturned = new HashSet <string>(); // Try to diminish the allocations happening because of .Resize() var mappedResults = new List <MappedResultInfo>(keysLeftToReduce.Count); context.TransactionalStorage.Batch(actions => { var take = context.CurrentNumberOfItemsToReduceInSingleBatch; using (StopwatchScope.For(getMappedResultsDuration)) { mappedResults = actions.MapReduce.GetMappedResults(index.IndexId, keysLeftToReduce, true, take, keysReturned, token, mappedResults); } }); var count = mappedResults.Count; int size = 0; foreach (var item in mappedResults) { item.Bucket = 0; size += item.Size; } var results = mappedResults.GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data)).ToArray(); context.MetricsCounters.ReducedPerSecond.Mark(results.Length); token.ThrowIfCancellationRequested(); var performance = context.IndexStorage.Reduce(index.IndexId, viewGenerator, results, 2, context, null, keysReturned, count); reductionPerformanceStats.Add(performance); autoTuner.AutoThrottleBatchSize(count, size, batchTimeWatcher.Elapsed); } var needToMoveToSingleStep = new HashSet <string>(); HashSet <string> set; while (needToMoveToSingleStepQueue.TryDequeue(out set)) { needToMoveToSingleStep.UnionWith(set); } foreach (var reduceKey in needToMoveToSingleStep) { string localReduceKey = reduceKey; transactionalStorage.Batch(actions => actions.MapReduce.UpdatePerformedReduceType(index.IndexId, localReduceKey, ReduceType.SingleStep)); } reduceLevelStats.Completed = SystemTime.UtcNow; reduceLevelStats.Duration = reduceLevelStats.Completed - reduceLevelStats.Started; reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_GetMappedResults, getMappedResultsDuration.ElapsedMilliseconds)); reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, 0)); // in single step we write directly to Lucene index foreach (var stats in reductionPerformanceStats) { reduceLevelStats.Add(stats); } reducePerformanceStats.LevelStats.Add(reduceLevelStats); } finally { long _; autoTuner.CurrentlyUsedBatchSizesInBytes.TryRemove(reducingBatchThrottlerId, out _); } return(reducePerformanceStats); }
public override IndexingPerformanceStats IndexDocuments(AbstractViewGenerator viewGenerator, IndexingBatch batch, IStorageActionsAccessor actions, DateTime minimumTimestamp, CancellationToken token) { token.ThrowIfCancellationRequested(); var count = 0; var sourceCount = 0; var writeToIndexStats = new List <PerformanceStats>(); IndexingPerformanceStats performance = null; var performanceStats = new List <BasePerformanceStats>(); var storageCommitDuration = new Stopwatch(); actions.BeforeStorageCommit += storageCommitDuration.Start; actions.AfterStorageCommit += () => { storageCommitDuration.Stop(); performanceStats.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds)); }; Write((indexWriter, analyzer, stats) => { var processedKeys = new HashSet <string>(); var batchers = context.IndexUpdateTriggers.Select(x => x.CreateBatcher(indexId)) .Where(x => x != null) .ToList(); try { performance = RecordCurrentBatch("Current", "Index", batch.Docs.Count); var deleteExistingDocumentsDuration = new Stopwatch(); Interlocked.Increment(ref sourceCount); var docIdTerm = new Term(Constants.DocumentIdFieldName); var documentsWrapped = batch.Docs.Select((doc, i) => { token.ThrowIfCancellationRequested(); if (doc.__document_id == null) { throw new ArgumentException( string.Format("Cannot index something which doesn't have a document id, but got: '{0}'", doc)); } string documentId = doc.__document_id.ToString(); if (processedKeys.Add(documentId) == false) { return(doc); } InvokeOnIndexEntryDeletedOnAllBatchers(batchers, docIdTerm.CreateTerm(documentId.ToLowerInvariant())); if (batch.SkipDeleteFromIndex[i] == false || context.ShouldRemoveFromIndex(documentId)) // maybe it is recently deleted? { using (StopwatchScope.For(deleteExistingDocumentsDuration)) { indexWriter.DeleteDocuments(docIdTerm.CreateTerm(documentId.ToLowerInvariant())); } } return(doc); }) .Where(x => x is FilteredDocument == false) .ToList(); performanceStats.Add(new PerformanceStats { Name = IndexingOperation.Lucene_DeleteExistingDocument, DurationMs = deleteExistingDocumentsDuration.ElapsedMilliseconds }); var allReferencedDocs = new ConcurrentQueue <IDictionary <string, HashSet <string> > >(); var allReferenceEtags = new ConcurrentQueue <IDictionary <string, Etag> >(); var parallelOperations = new ConcurrentQueue <ParallelBatchStats>(); var parallelProcessingStart = SystemTime.UtcNow; context.Database.MappingThreadPool.ExecuteBatch(documentsWrapped, (IEnumerator <dynamic> partition) => { token.ThrowIfCancellationRequested(); var parallelStats = new ParallelBatchStats { StartDelay = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds }; var anonymousObjectToLuceneDocumentConverter = new AnonymousObjectToLuceneDocumentConverter(context.Database, indexDefinition, viewGenerator, logIndexing); var luceneDoc = new Document(); var documentIdField = new Field(Constants.DocumentIdFieldName, "dummy", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); using (CurrentIndexingScope.Current = new CurrentIndexingScope(context.Database, PublicName)) { string currentDocId = null; int outputPerDocId = 0; Action <Exception, object> onErrorFunc; bool skipDocument = false; var linqExecutionDuration = new Stopwatch(); var addDocumentDutation = new Stopwatch(); var convertToLuceneDocumentDuration = new Stopwatch(); foreach (var doc in RobustEnumerationIndex(partition, viewGenerator.MapDefinitions, stats, out onErrorFunc, linqExecutionDuration)) { token.ThrowIfCancellationRequested(); float boost; IndexingResult indexingResult; using (StopwatchScope.For(convertToLuceneDocumentDuration)) { try { indexingResult = GetIndexingResult(doc, anonymousObjectToLuceneDocumentConverter, out boost); } catch (Exception e) { onErrorFunc(e, doc); continue; } } // ReSharper disable once RedundantBoolCompare --> code clarity if (indexingResult.NewDocId == null || indexingResult.ShouldSkip != false) { continue; } if (currentDocId != indexingResult.NewDocId) { currentDocId = indexingResult.NewDocId; outputPerDocId = 0; skipDocument = false; } if (skipDocument) { continue; } outputPerDocId++; if (EnsureValidNumberOfOutputsForDocument(currentDocId, outputPerDocId) == false) { skipDocument = true; continue; } Interlocked.Increment(ref count); using (StopwatchScope.For(convertToLuceneDocumentDuration)) { luceneDoc.GetFields().Clear(); luceneDoc.Boost = boost; documentIdField.SetValue(indexingResult.NewDocId.ToLowerInvariant()); luceneDoc.Add(documentIdField); foreach (var field in indexingResult.Fields) { luceneDoc.Add(field); } } batchers.ApplyAndIgnoreAllErrors( exception => { logIndexing.WarnException( string.Format( "Error when executed OnIndexEntryCreated trigger for index '{0}', key: '{1}'", PublicName, indexingResult.NewDocId), exception); context.AddError( indexId, PublicName, indexingResult.NewDocId, exception, "OnIndexEntryCreated Trigger"); }, trigger => trigger.OnIndexEntryCreated(indexingResult.NewDocId, luceneDoc)); LogIndexedDocument(indexingResult.NewDocId, luceneDoc); using (StopwatchScope.For(addDocumentDutation)) { AddDocumentToIndex(indexWriter, luceneDoc, analyzer); } Interlocked.Increment(ref stats.IndexingSuccesses); } allReferenceEtags.Enqueue(CurrentIndexingScope.Current.ReferencesEtags); allReferencedDocs.Enqueue(CurrentIndexingScope.Current.ReferencedDocuments); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.LoadDocument, CurrentIndexingScope.Current.LoadDocumentDuration.ElapsedMilliseconds)); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Linq_MapExecution, linqExecutionDuration.ElapsedMilliseconds)); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Lucene_ConvertToLuceneDocument, convertToLuceneDocumentDuration.ElapsedMilliseconds)); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Lucene_AddDocument, addDocumentDutation.ElapsedMilliseconds)); parallelOperations.Enqueue(parallelStats); } }, description: string.Format("Mapping index {0} from Etag {1} to Etag {2}", this.PublicName, this.GetLastEtagFromStats(), batch.HighestEtagBeforeFiltering)); performanceStats.Add(new ParallelPerformanceStats { NumberOfThreads = parallelOperations.Count, DurationMs = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds, BatchedOperations = parallelOperations.ToList() }); var updateDocumentReferencesDuration = new Stopwatch(); using (StopwatchScope.For(updateDocumentReferencesDuration)) { UpdateDocumentReferences(actions, allReferencedDocs, allReferenceEtags); } performanceStats.Add(PerformanceStats.From(IndexingOperation.UpdateDocumentReferences, updateDocumentReferencesDuration.ElapsedMilliseconds)); } catch (Exception e) { batchers.ApplyAndIgnoreAllErrors( ex => { logIndexing.WarnException("Failed to notify index update trigger batcher about an error in " + PublicName, ex); context.AddError(indexId, PublicName, null, ex, "AnErrorOccured Trigger"); }, x => x.AnErrorOccured(e)); throw; } finally { batchers.ApplyAndIgnoreAllErrors( e => { logIndexing.WarnException("Failed to dispose on index update trigger in " + PublicName, e); context.AddError(indexId, PublicName, null, e, "Dispose Trigger"); }, x => x.Dispose()); } return(new IndexedItemsInfo(batch.HighestEtagBeforeFiltering) { ChangedDocs = sourceCount }); }, writeToIndexStats); performanceStats.AddRange(writeToIndexStats); InitializeIndexingPerformanceCompleteDelegate(performance, sourceCount, count, performanceStats); if (logIndexing.IsDebugEnabled) { logIndexing.Debug("Indexed {0} documents for {1}", count, PublicName); } return(performance); }
private ReducingPerformanceStats MultiStepReduce(IndexToWorkOn index, List <string> keysToReduce, AbstractViewGenerator viewGenerator, ConcurrentSet <object> itemsToDelete, CancellationToken token) { var needToMoveToMultiStep = new HashSet <string>(); transactionalStorage.Batch(actions => { foreach (var localReduceKey in keysToReduce) { token.ThrowIfCancellationRequested(); var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexId, localReduceKey); if (lastPerformedReduceType != ReduceType.MultiStep) { needToMoveToMultiStep.Add(localReduceKey); } if (lastPerformedReduceType != ReduceType.SingleStep) { continue; } // we exceeded the limit of items to reduce in single step // now we need to schedule reductions at level 0 for all map results with given reduce key var mappedItems = actions.MapReduce.GetMappedBuckets(index.IndexId, localReduceKey, token).ToList(); foreach (var result in mappedItems.Select(x => new ReduceKeyAndBucket(x, localReduceKey))) { actions.MapReduce.ScheduleReductions(index.IndexId, 0, result); } } }); var reducePerformance = new ReducingPerformanceStats(ReduceType.MultiStep); for (int i = 0; i < 3; i++) { var level = i; var reduceLevelStats = new ReduceLevelPeformanceStats() { Level = level, Started = SystemTime.UtcNow, }; var reduceParams = new GetItemsToReduceParams( index.IndexId, new HashSet <string>(keysToReduce), level, true, itemsToDelete); var gettingItemsToReduceDuration = new Stopwatch(); var scheduleReductionsDuration = new Stopwatch(); var removeReduceResultsDuration = new Stopwatch(); var storageCommitDuration = new Stopwatch(); bool retry = true; while (retry && reduceParams.ReduceKeys.Count > 0) { var reduceBatchAutoThrottlerId = Guid.NewGuid(); try { transactionalStorage.Batch(actions => { token.ThrowIfCancellationRequested(); actions.BeforeStorageCommit += storageCommitDuration.Start; actions.AfterStorageCommit += storageCommitDuration.Stop; var batchTimeWatcher = Stopwatch.StartNew(); reduceParams.Take = context.CurrentNumberOfItemsToReduceInSingleBatch; int size = 0; IList <MappedResultInfo> persistedResults; var reduceKeys = new HashSet <string>(StringComparer.InvariantCultureIgnoreCase); using (StopwatchScope.For(gettingItemsToReduceDuration)) { persistedResults = actions.MapReduce.GetItemsToReduce(reduceParams, token); foreach (var item in persistedResults) { reduceKeys.Add(item.ReduceKey); size += item.Size; } } if (persistedResults.Count == 0) { retry = false; return; } var count = persistedResults.Count; autoTuner.CurrentlyUsedBatchSizesInBytes.GetOrAdd(reduceBatchAutoThrottlerId, size); if (Log.IsDebugEnabled) { if (persistedResults.Count > 0) { Log.Debug(() => string.Format("Found {0} results for keys [{1}] for index {2} at level {3} in {4}", persistedResults.Count, string.Join(", ", persistedResults.Select(x => x.ReduceKey).Distinct()), index.Index.PublicName, level, batchTimeWatcher.Elapsed)); } else { Log.Debug("No reduce keys found for {0}", index.Index.PublicName); } } token.ThrowIfCancellationRequested(); var requiredReduceNextTimeSet = new HashSet <ReduceKeyAndBucket>(persistedResults.Select(x => new ReduceKeyAndBucket(x.Bucket, x.ReduceKey)), ReduceKeyAndBucketEqualityComparer.Instance); using (StopwatchScope.For(removeReduceResultsDuration)) { foreach (var mappedResultInfo in requiredReduceNextTimeSet) { token.ThrowIfCancellationRequested(); actions.MapReduce.RemoveReduceResults(index.IndexId, level + 1, mappedResultInfo.ReduceKey, mappedResultInfo.Bucket); } } if (level != 2) { var reduceKeysAndBucketsSet = new HashSet <ReduceKeyAndBucket>(requiredReduceNextTimeSet.Select(x => new ReduceKeyAndBucket(x.Bucket / 1024, x.ReduceKey)), ReduceKeyAndBucketEqualityComparer.Instance); using (StopwatchScope.For(scheduleReductionsDuration)) { foreach (var reduceKeysAndBucket in reduceKeysAndBucketsSet) { token.ThrowIfCancellationRequested(); actions.MapReduce.ScheduleReductions(index.IndexId, level + 1, reduceKeysAndBucket); } } } token.ThrowIfCancellationRequested(); var reduceTimeWatcher = Stopwatch.StartNew(); var results = persistedResults.Where(x => x.Data != null) .GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data)) .ToList(); var performance = context.IndexStorage.Reduce(index.IndexId, viewGenerator, results, level, context, actions, reduceKeys, persistedResults.Count); context.MetricsCounters.ReducedPerSecond.Mark(results.Count()); reduceLevelStats.Add(performance); var batchDuration = batchTimeWatcher.Elapsed; if (Log.IsDebugEnabled) { Log.Debug("Indexed {0} reduce keys in {1} with {2} results for index {3} in {4} on level {5}", reduceKeys.Count, batchDuration, performance.ItemsCount, index.Index.PublicName, reduceTimeWatcher.Elapsed, level); } autoTuner.AutoThrottleBatchSize(count, size, batchDuration); }); } finally { long _; autoTuner.CurrentlyUsedBatchSizesInBytes.TryRemove(reduceBatchAutoThrottlerId, out _); } } reduceLevelStats.Completed = SystemTime.UtcNow; reduceLevelStats.Duration = reduceLevelStats.Completed - reduceLevelStats.Started; reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_GetItemsToReduce, gettingItemsToReduceDuration.ElapsedMilliseconds)); reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_ScheduleReductions, scheduleReductionsDuration.ElapsedMilliseconds)); reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_RemoveReduceResults, removeReduceResultsDuration.ElapsedMilliseconds)); reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds)); reducePerformance.LevelStats.Add(reduceLevelStats); } foreach (var reduceKey in needToMoveToMultiStep) { token.ThrowIfCancellationRequested(); string localReduceKey = reduceKey; transactionalStorage.Batch(actions => actions.MapReduce.UpdatePerformedReduceType(index.IndexId, localReduceKey, ReduceType.MultiStep)); } return(reducePerformance); }
protected ReducingPerformanceStats[] HandleReduceForIndex(IndexToWorkOn indexToWorkOn, CancellationToken token) { var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexToWorkOn.IndexId); if (viewGenerator == null) { return(null); } bool operationCanceled = false; var itemsToDelete = new ConcurrentSet <object>(); var singleStepReduceKeys = new List <string>(); var multiStepsReduceKeys = new List <string>(); transactionalStorage.Batch(actions => { var mappedResultsInfo = actions.MapReduce.GetReduceTypesPerKeys(indexToWorkOn.IndexId, context.CurrentNumberOfItemsToReduceInSingleBatch, context.NumberOfItemsToExecuteReduceInSingleStep, token); foreach (var key in mappedResultsInfo) { token.ThrowIfCancellationRequested(); switch (key.OperationTypeToPerform) { case ReduceType.SingleStep: singleStepReduceKeys.Add(key.ReduceKey); break; case ReduceType.MultiStep: multiStepsReduceKeys.Add(key.ReduceKey); break; } } }); currentlyProcessedIndexes.TryAdd(indexToWorkOn.IndexId, indexToWorkOn.Index); var performanceStats = new List <ReducingPerformanceStats>(); try { if (singleStepReduceKeys.Count > 0) { if (Log.IsDebugEnabled) { Log.Debug("SingleStep reduce for keys: {0}", string.Join(",", singleStepReduceKeys)); } var singleStepStats = SingleStepReduce(indexToWorkOn, singleStepReduceKeys, viewGenerator, itemsToDelete, token); performanceStats.Add(singleStepStats); } if (multiStepsReduceKeys.Count > 0) { if (Log.IsDebugEnabled) { Log.Debug("MultiStep reduce for keys: {0}", string.Join(",", multiStepsReduceKeys)); } var multiStepStats = MultiStepReduce(indexToWorkOn, multiStepsReduceKeys, viewGenerator, itemsToDelete, token); performanceStats.Add(multiStepStats); } } catch (OperationCanceledException) { operationCanceled = true; } catch (AggregateException e) { var anyOperationsCanceled = e .InnerExceptions .OfType <OperationCanceledException>() .Any(); if (anyOperationsCanceled == false) { throw; } operationCanceled = true; } finally { var postReducingOperations = new ReduceLevelPeformanceStats { Level = -1, Started = SystemTime.UtcNow }; if (operationCanceled == false) { var deletingScheduledReductionsDuration = new Stopwatch(); var storageCommitDuration = new Stopwatch(); // whatever we succeeded in indexing or not, we have to update this // because otherwise we keep trying to re-index failed mapped results transactionalStorage.Batch(actions => { actions.BeforeStorageCommit += storageCommitDuration.Start; actions.AfterStorageCommit += storageCommitDuration.Stop; ScheduledReductionInfo latest; using (StopwatchScope.For(deletingScheduledReductionsDuration)) { latest = actions.MapReduce.DeleteScheduledReduction(itemsToDelete); } if (latest == null) { return; } actions.Indexing.UpdateLastReduced(indexToWorkOn.IndexId, latest.Etag, latest.Timestamp); }); postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_DeleteScheduledReductions, deletingScheduledReductionsDuration.ElapsedMilliseconds)); postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds)); } postReducingOperations.Completed = SystemTime.UtcNow; postReducingOperations.Duration = postReducingOperations.Completed - postReducingOperations.Started; performanceStats.Add(new ReducingPerformanceStats(ReduceType.None) { LevelStats = new List <ReduceLevelPeformanceStats> { postReducingOperations } }); Index _; currentlyProcessedIndexes.TryRemove(indexToWorkOn.IndexId, out _); } return(performanceStats.ToArray()); }
public IActionResult Migrate(int id) { var result = string.Empty; var stock = new ImportStock(); var feedback = new ImportFeedback(); var strategy = new ImportStrategy(); var transaction = new ImportTransaction(); switch (id) { case 0: //Erase Database { PerformanceStats.Reset(); _modelRepositoryDeletionCoordinator.DeleteAll(); result = "Deleted data from current application database"; break; } case 1: //Import stocks { stock.Start(); result = "Imported stocks"; new MigrationItemPersister <StockDto>("stock").Set(stock.Items); new MigrationItemPersister <StockDto>("dividendstock").Set(stock.DividendItems); break; } case 2: //Import quotations { //var quotations = new ImportQuotations(); //quotations.StockItems = new MigrationItemPersister<StockDto>("stock").Get(); //quotations.Start(); result = "Imported quotations"; break; } case 3: //Download quotations { //var downloadQuotations = new DownloadQuotations(); //downloadQuotations.Start(); result = "Downloaded quotations"; break; } case 4: //Import feedbacks { feedback.Start(); result = "Imported feedbacks"; new MigrationItemPersister <FeedbackDto>("feedback").Set(feedback.Items); break; } case 5: //Import strategies { strategy.Start(); result = "Imported strategies"; new MigrationItemPersister <StrategyDto>("strategy").Set(strategy.Items); break; } case 6: //Import calculations { var calculation = new ImportCalculations(); calculation.Start(); result = "Imported calculations"; break; } case 7: //Import transactions { transaction.FeedbackItems = new MigrationItemPersister <FeedbackDto>("feedback").Get(); transaction.StockItems = new MigrationItemPersister <StockDto>("stock").Get(); transaction.StrategyItems = new MigrationItemPersister <StrategyDto>("strategy").Get(); transaction.DividendStockItems = new MigrationItemPersister <StockDto>("dividendstock").Get(); transaction.Start(); result = "Imported transactions"; new MigrationItemPersister <ITransactionDto>("transaction").Set(transaction.Items); break; } case 8: //Testing queries { var testing = new TestQueries(); testing.Start(); result = "Tested results with queries"; break; } case 9: //Testing performance { var stats = new TestPerformance(new MigrationItemPersister <ITransactionDto>("transaction").Get()); stats.Start(); result = "Finished testing statistics"; break; } case 10: //Testing open positions { var openPositions = new TestOpenPositions(new MigrationItemPersister <StockDto>("stock").Get(), new MigrationItemPersister <ITransactionDto>("transaction").Get()); openPositions.Start(); result = "Finished testing open positions"; break; } case 11: //Statistics { PerformanceStats.WriteToConsole(); result = "Flushed performance statistis"; break; } default: { break; } } return(Json(result)); }
public void set_for_GRAVE(List <PerformanceStats> aStats) { //timing vars float gIntroText = 4.5f; float gPreScoreCount = 0.03f; float gScoreCount = 0.2f; float gPostScoreCount = 0.07f; float gRestart = 29; //disable the depth warning mManager.mZigManager.ForceShow = 2; //add the gravestone to the scene add_character(CharacterIndex.sGrave, true, false); //remove the grave if (aStats.Last().Character.Age == 999) { aStats.RemoveAt(aStats.Count - 1); } //add in fetus in case we skipped it in debug mode if (aStats.First().Character.Age != 0) { aStats.Insert(0, new PerformanceStats(new CharacterIndex(0, 0))); } //fake it for testing... /* TODO DELETE move this into ModeNormalPlay on force kill * mCharacters.Last().destroy(); //remove the grave * mElement.Remove(mCharacters.Last()); * mCharacters.RemoveAt(mCharacters.Count -1); * Random.seed = 123; * for(int i = 0; i < 8; i++) * { * if(aStats.Last().Character.Age < (new CharacterIndex(i,0)).Age) * { * PerformanceStats stat = new PerformanceStats(new CharacterIndex(i,Random.Range(0,4))); * stat.update_score(0,Random.value); * stat.update_score(1,Random.value); * stat.Stats = mManager.mGameManager.CharacterHelper.Characters[stat.Character]; * aStats.Add(stat); * * add_character(stat.Character,false); * } * } * add_character(CharacterIndex.sGrave,false); //add the grave back in */ //this is all a hack to get the score to show up right... float scoreIncrementor = 0; FlatElementText finalScoreText = new FlatElementText(mManager.mNewRef.serifFont, 70, "0", 21); finalScoreText.HardColor = (GameConstants.UiGraveText); FlatElementText finalAgeText = new FlatElementText(mManager.mNewRef.serifFont, 50, "0", 21); float ageIncrementer = 0; finalAgeText.HardColor = (GameConstants.UiGraveText); //perfectPercent.Text = ((int)(100*aStats.Sum(e=>e.Stats.Perfect+1)/(float)(aStats.Count*3))).ToString() + "%"; //TODO why this no work?? finalAgeText.Text = "0"; //aStats.Last().Character.Age.ToString(); //hack to put things into bg camera foreach (Renderer f in finalScoreText.PrimaryGameObject.GetComponentsInChildren <Renderer>()) { f.gameObject.layer = 4; } foreach (Renderer f in finalAgeText.PrimaryGameObject.GetComponentsInChildren <Renderer>()) { f.gameObject.layer = 4; } //foreach (Renderer f in perfectEngraving.PrimaryGameObject.GetComponentsInChildren<Renderer>()) f.gameObject.layer = 4; Vector3 graveCenter = mCharacters[mCharacters.Count - 1].HardPosition + new Vector3(0, 50, 0); finalScoreText.HardPosition = graveCenter + new Vector3(30, -180, 0); finalAgeText.HardPosition = graveCenter + new Vector3(25, 0, 0); mElement.Add(finalScoreText); //mElement.Add(perfectEngraving); mElement.Add(finalAgeText); graveCleanup.Add(finalScoreText); graveCleanup.Add(finalAgeText); TimedEventDistributor.TimedEventChain chain = TED.empty_chain(); chain = chain.then_one_shot( delegate { set_sun(); } , 0); chain = chain.then( low_skippable_text_bubble_event(GameStrings.GetString("SM1"), gIntroText), 3); /*.then( //wait a little bit to let the fading finish * low_skippable_text_bubble_event("HERE IS YOUR LIFE STORY",gIntroText) * );*/ for (int i = 1; i < aStats.Count; i++) { PerformanceStats ps = aStats[i]; chain = chain.then_one_shot( delegate() { if (ps.Character != CharacterIndex.sOneHundred) { show_score(ps.Character, (int)ps.AdjustedScore, gPreScoreCount + gScoreCount + gPostScoreCount + 1.5f); } } , gPreScoreCount).then( delegate(float aTime) { if (aTime < gScoreCount) { float displayScore = scoreIncrementor + (aTime / gScoreCount) * ps.AdjustedScore; float displayAge = ageIncrementer + (aTime / gScoreCount) * (ps.Character.Age - ageIncrementer); finalScoreText.Text = "" + (int)displayScore; finalAgeText.Text = "" + (int)displayAge; } if (aTime > gScoreCount + gPostScoreCount) { scoreIncrementor += ps.AdjustedScore; ageIncrementer = ps.Character.Age; finalScoreText.Text = "" + (int)scoreIncrementor; finalAgeText.Text = "" + (int)ageIncrementer; return(true); } return(false); }, 0); } chain = chain.wait(2.5f); //CONNECTIONS for (int i = 1; i < aStats.Count; i++) { PerformanceStats ps = aStats[i]; float gFirstConnectionText = 5f; float gConnectionText = 5f; float gPreParticle = 2f; //TODO grave connections CharIndexContainerString connections; //TODO bool wasHard = ps.Stats.Difficulty > 1; if (wasHard) { connections = mManager.mCharacterBundleManager.get_character_stat(ps.Character).CharacterInfo.HardConnections; } else { connections = mManager.mCharacterBundleManager.get_character_stat(ps.Character).CharacterInfo.EasyConnections; } //for each connection, check if it is relevent to the currently looping character for (int j = 1; j < aStats.Count; j++) { var targetCharacter = aStats[j].Character; //charcter we are connecting to var targetConnection = connections[targetCharacter]; //message if (targetConnection != null && targetConnection != "") { int accumChange = 0; //accum change is targetCharacters effect on the current character accumChange = aStats[j].CutsceneChangeSet.accumulative_changes()[ps.Character]; if ((wasHard && accumChange > 0) || //if was hard and effect was positive (i.e. hard) (!wasHard && accumChange < 0)) //if was easy and effect was negative (i.e. easy) { string [] conText = targetConnection.Replace("<S>", "@").Split('@'); PopupTextObject npo = null; chain = chain.then( delegate(float aTime) { if (npo == null) { npo = add_timed_text_bubble(conText[0], gFirstConnectionText + gConnectionText, -0.6f, 1); set_popup_color_for_cutscene_particles(npo, wasHard); create_shine_over_character(targetCharacter, !wasHard, gFirstConnectionText + gConnectionText); TED.add_one_shot_event( delegate() { create_shine_over_character(ps.Character, !wasHard, gFirstConnectionText + gConnectionText - 0.3f); } , 0.3f); } if (npo.IsDestroyed || aTime > gPreParticle) { return(true); } return(false); } , 0); System.Func <FlatElementBase, float, bool> jiggleDelegate = delegate(FlatElementBase aBase, float aTime2) { aBase.mLocalRotation = Quaternion.AngleAxis(Mathf.Sin(aTime2 * Mathf.PI * 2 * 8) * 10f, Vector3.forward); if (aTime2 >= (gFirstConnectionText - gPreParticle) / 4f) { aBase.mLocalRotation = Quaternion.identity; return(true); } return(false); }; chain = chain.then_one_shot( delegate() { if (npo != null) { mCharacters[char_to_list_index(targetCharacter)].Events.add_event(jiggleDelegate, 0); ManagerManager.Manager.mMusicManager.play_sound_effect("graveShine"); } //create the shine } ).then_one_shot( delegate() { if (npo != null && !npo.IsDestroyed) //if we used softskip, npo could be destroyed at this point { npo.Text = conText.Last(); mCharacters[char_to_list_index(ps.Character)].Events.add_event(jiggleDelegate, 0); ManagerManager.Manager.mMusicManager.play_sound_effect("graveShine"); } }, gFirstConnectionText - gPreParticle).then( delegate(float aTime){ if (npo.IsDestroyed || aTime > gConnectionText) { npo = null; return(true); } return(false); } ); } } } } string deathSentence = ""; if (aStats[aStats.Count - 1].DeathTime != -1) { deathSentence += GameStrings.GetString("SM2"); } else { deathSentence += GameStrings.GetString("SM3"); } if (!aStats [aStats.Count - 1].Character.IsDescriptionAdjective) { if ("aeiouAEIOU".IndexOf(aStats[aStats.Count - 1].Character.Description[0]) >= 0) { deathSentence += GameStrings.GetString("SM4"); } else { deathSentence += GameStrings.GetString("SM5"); } } deathSentence += aStats[aStats.Count - 1].Character.Description; chain = chain.then( low_skippable_text_bubble_event(deathSentence, gIntroText) , 1); foreach (CharacterIndex e in CharacterIndex.sAllCharacters) { UnlockRequirements.UnlockData unlockData; if (mManager.mMetaManager.UnlockManager.unlockedThisGame.TryGetValue(new UnlockRequirements.FakeCharIndex(e), out unlockData)) { if (unlockData != null) { ManagerManager.Log("announcing unlock " + e.StringIdentifier); CharacterIndex ce = new CharacterIndex(e); chain = chain.then_one_shot( delegate(){ mUnlockAnnouncer.announce_unlock(ce, unlockData); } , 0).then( delegate(float aTime){ return(!mUnlockAnnouncer.IsAnnouncing); } , 0); } } } //so we don't announce unlock again when we restart mManager.mMetaManager.UnlockManager.unlockedThisGame.Clear(); if (GameConstants.showReward && aStats[aStats.Count - 1].Character.LevelIndex >= 7) { FlatElementImage rewardImage = null; FlatElementImage rewardFrame = null; mModeNormalPlay.mGiftManager.set_background_for_render(); chain = chain.then_one_shot( delegate() { var frameImg = mManager.mCharacterBundleManager.get_image("GIFT_frame"); rewardFrame = new FlatElementImage(frameImg.Image, frameImg.Data.Size, 24); rewardImage = new FlatElementImage(mModeNormalPlay.mGiftManager.render_gift(0), new Vector2(2001, 1128), 23); //TODO play sound effect rewardImage.HardPosition = mFlatCamera.get_point(0, 3); rewardFrame.HardPosition = rewardImage.HardPosition; rewardImage.SoftPosition = mFlatCamera.get_point(Vector3.zero) + new Vector3(0, 150, 0); rewardFrame.SoftPosition = rewardImage.SoftPosition + new Vector3(0, 70, 0); mElement.Add(rewardImage); mElement.Add(rewardFrame); graveCleanup.Add(rewardImage); graveCleanup.Add(rewardFrame); var subChain = TED.empty_chain().wait(4); if (mModeNormalPlay.mGiftManager.gift_count() > 0) { for (int i = 1; i < 100; i++) { int localIndex = i % mModeNormalPlay.mGiftManager.gift_count(); subChain = subChain.then_one_shot( delegate(){ //TODO sound effect mModeNormalPlay.mGiftManager.render_gift(localIndex); } , 1f); } } } , 2); //chain = chain.wait(6); //chain = chain.then(skippable_text_bubble_event("YOU ARE THE PERFECT WOMAN!",5,-0.8f,2),0); } //variables for credits animation.. float lastTime = 0; FlatElementImage[] logos = null; //PopupTextObject gameOver = null; List <FlatElementText> creditsText = new List <FlatElementText>(); float scrollSpeed = 820; mGraveCompleteCb = delegate() { Vector3 barYPosition = mFlatCamera.get_point(Vector3.zero) + new Vector3(0, -700, 0); TED.add_one_shot_event( delegate() { mManager.mMusicManager.fade_in_extra_music("creditsMusic"); mManager.mMusicManager.fade_out(); var imgData = mManager.mCharacterBundleManager.get_image("BAR"); var barImg = new FlatElementImage(imgData.Image, imgData.Data.Size, 24); barImg.HardPosition = barYPosition + new Vector3(0, -1000, 0); barImg.SoftPosition = barYPosition; mElement.Add(barImg); graveCleanup.Add(barImg); } , 0).then_one_shot( delegate() { float lastXPosition = mFlatCamera.get_point(Vector3.zero).x - mFlatCamera.Width / 2 - 100; int counter = 0; foreach (string e in GameConstants.credits) { string val = e; if (System.Text.RegularExpressions.Regex.IsMatch(e, @"^\d+$")) { val = GameStrings.GetString("GCcredits" + e); } var text = new FlatElementText(mManager.mNewRef.genericFont, 70, val, 25); float textWidth = text.BoundingBox.width; text.HardColor = new Color(1, 1, 1, 1); text.HardPosition = new Vector3(lastXPosition - textWidth / 2f, barYPosition.y, 0); lastXPosition += -textWidth - 75; creditsText.Add(text); mElement.Add(text); graveCleanup.Add(text); counter++; } if (GameConstants.SHOW_LOGOS) { logos = new FlatElementImage[3]; lastXPosition += -200; string[] imageNames = new string[] { "LOGO_FA", "LOGO_AI", "LOGO_GL" }; for (int i = 0; i < imageNames.Length; i++) { var imgData = mManager.mCharacterBundleManager.get_image(imageNames[i]); var img = new FlatElementImage(imgData.Image, imgData.Data.Size, 25); float imgWidth = img.BoundingBox.width; img.HardPosition = new Vector3(lastXPosition - imgWidth / 2, barYPosition.y, 0); lastXPosition += -img.BoundingBox.width / 2f - 500; logos[i] = img; mElement.Add(img); graveCleanup.Add(img); } } } , 1).then_one_shot( delegate() { /* this will fade everything out super slowly * List<FlatElementBase> graveItems = new List<FlatElementBase>(){finalScoreText,perfectPercent}; * foreach(FlatElementBase e in * mCharacters.Cast<FlatElementBase>() * .Concat(mDiffLabels.Cast<FlatElementBase>()) * .Concat(mScoreLabels.Cast<FlatElementBase>()) * .Concat(mScoreTexts.Cast<FlatElementBase>()) * .Concat(graveItems.Cast<FlatElementBase>())) * { * e.ColorInterpolationLimit = 0.05f; * e.SoftColor = GameConstants.UiWhiteTransparent; * }*/ } , 0).then( delegate(float aTime) { //scroll contents down Vector3 scroll = new Vector3(scrollSpeed * (aTime - lastTime), 0, 0); foreach (FlatElementText e in creditsText) { e.SoftPosition = e.SoftPosition + scroll; } if (logos != null) { foreach (FlatElementImage e in logos) { e.SoftPosition = e.SoftPosition + scroll; } } lastTime = aTime; if (Input.GetKeyDown(KeyCode.Alpha0)) { return(true); } if (aTime > gRestart) { return(true); } return(false); } , 0).then_one_shot( delegate(){ mManager.mMusicManager.fade_out_extra_music(); mManager.restart_game(); } , 0); mGraveCompleteChain = TED.LastEventKeyAdded; }; chain = chain.then_one_shot( delegate() { mGraveCompleteCb(); mGraveCompleteCb = null; mGraveChain = null; } , 1); mGraveChain = TED.LastEventKeyAdded; }
private void button1_Click(object sender, EventArgs e) { Console.WriteLine("******"); System.Diagnostics.Stopwatch sw = new System.Diagnostics.Stopwatch(); sw.Start(); PerformanceStats stats = new PerformanceStats(); for (int i = 0; i < (int)stepsUpDown.Value; i++) { stats = world.stepAgent(actionTextBox.Text); //trajWriter.WriteLine(string.Join(",", ((Agent<int[], int[]>)agent).state)); label1.Text = i.ToString(); label1.Refresh(); chart1.Series.Last().Points.AddY(stats.cumulativeReward); chart2.Series.Last().Points.AddY(stats.modelAccesses + stats.modelUpdates); writer.WriteLine("Reward: " + stats.cumulativeReward); if (displayCheckBox.Checked) { pictureBox1.Image = world.showState(pictureBox1.Width, pictureBox1.Height, true); pictureBox1.Refresh(); Thread.Sleep(20); if (saveImages) { pictureBox1.Image.Save(saveFolder + numSavedImages.ToString() + ".bmp"); numSavedImages++; } } } sw.Stop(); chart3.Series.Last().Points.Clear(); foreach (double d in stats.stepsToGoal) { if (d <= 0) { break; } chart3.Series.Last().Points.Add(d); } label1.Text = Math.Round(sw.Elapsed.TotalSeconds, 1) + "s"; pictureBox1.Image = world.showState(pictureBox1.Width, pictureBox1.Height, true); //System.IO.StreamReader r = new System.IO.StreamReader("log.txt"); //string text = r.ReadLine(); //if (text==null || (text.IndexOf("null")!=-1)) // pictureBox1.Image = world.showState(pictureBox1.Width, pictureBox1.Height); //else //{ // int start = text.IndexOf("Level ") + 6; // string goalLevelString = text.Substring(start, 1); // int goalLevel = Convert.ToInt32(goalLevelString); // start = text.IndexOf("at ") + 3; // string[] goalString = text.Substring(start).Split(','); // int[] goal = new int[2]; // goal[0] = Convert.ToInt32(goalString[0]); // goal[1] = Convert.ToInt32(goalString[1]); // pictureBox1.Image = world.showState(pictureBox1.Width, pictureBox1.Height, true); //} //r.Close(); // chart cumulative reward //chart1.Series.Last().Points.Clear(); //for (int i = 0; i < world.agent.cumulativeReward.Count; i++) //{ // chart1.Series.Last().Points.AddXY(i, world.agent.cumulativeReward[i]); //} writer.Flush(); //writer.Close(); }
public void ResetStats() { stats = new PerformanceStats(); }
static void Main(string[] args) { //MultiResolutionRL.StateManagement.learnedStateTree tree = new MultiResolutionRL.StateManagement.learnedStateTree(); //IntArrayComparer comparer = new IntArrayComparer(); //System.IO.StreamReader rdr = new System.IO.StreamReader("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\Fuzzy Place Field Test\\parents1.csv"); //rdr.ReadLine(); //string thisline; //while ((thisline = rdr.ReadLine()) != null) //{ // string[] elements = thisline.Split(','); // int[] thisState = new int[2] { Convert.ToInt32(elements[0]), Convert.ToInt32(elements[1]) }; // int[] parent = tree.GetParentState(thisState, 3); // if (comparer.Equals(parent, new int[2] { 3, 5 })) // { // int a = 0; // } // List<int[]> children = tree.GetLevel0Children(parent, 3); //} //// task-switch study //int runs = 48; //int goalCt = 10; //List<double>[] stepsToGoal = new List<double>[runs]; //List<double>[] cumModelUse = new List<double>[runs]; //System.Diagnostics.Stopwatch sw = new System.Diagnostics.Stopwatch(); //sw.Start(); //ParallelOptions op = new ParallelOptions() //{ // MaxDegreeOfParallelism = 8 //}; //Parallel.For(0, runs, op, (run) => //{ // cumModelUse[run] = new List<double>(); // // instantiate world // World thisWorld = new GridWorld(); // // load 1st map // thisWorld.Load("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\map10.bmp"); // // add agent // System.Threading.Thread.Sleep(run * 100); // staggered instantiation to avoid identical random number generators // //thisWorld.addAgent(typeof(EGreedyPolicy<,>), typeof(MultiGridWorldModel<,>), 8); // thisWorld.addAgent(typeof(EGreedyPolicy<,>), typeof(ModelBasedValue<,>)); // // run // PerformanceStats stats = new PerformanceStats(); // while (stats.stepsToGoal.Count <= goalCt) // { // stats = thisWorld.stepAgent(""); // if (stats.stepsToGoal.Last() == 0) // { // cumModelUse[run].Add(stats.modelAccesses + stats.modelUpdates); // Console.WriteLine("run " + run.ToString() + " goal count: " + stats.stepsToGoal.Count); // } // } // // switch task // thisWorld.Load("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\map10e.bmp"); // // run again // while (stats.stepsToGoal.Count <= goalCt * 2) // { // stats = thisWorld.stepAgent(""); // if (stats.stepsToGoal.Last() == 0) // { // cumModelUse[run].Add(stats.modelAccesses + stats.modelUpdates); // Console.WriteLine("run " + run.ToString() + " goal count: " + stats.stepsToGoal.Count); // } // } // stepsToGoal[run] = stats.stepsToGoal; //}); //System.IO.StreamWriter writer = new System.IO.StreamWriter("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\stepsToGoal.csv"); //for (int i = 0; i < stepsToGoal[0].Count; i++) //{ // List<string> line = new List<string>(); // foreach (List<double> series in stepsToGoal) // { // line.Add(series[i].ToString()); // } // writer.WriteLine(string.Join(",", line)); //} //writer.Flush(); //writer.Close(); //writer = new System.IO.StreamWriter("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\modelUse.csv"); //for (int i = 0; i < cumModelUse[0].Count; i++) //{ // List<string> line = new List<string>(); // foreach (List<double> series in cumModelUse) // { // line.Add(series[i].ToString()); // } // writer.WriteLine(string.Join(",", line)); //} //writer.Flush(); //writer.Close(); // stochastic reward study int runs = 48; int goalCt = 100; List <double>[] stepsToGoal = new List <double> [runs]; List <double>[] cumModelUse = new List <double> [runs]; System.Diagnostics.Stopwatch sw = new System.Diagnostics.Stopwatch(); sw.Start(); ParallelOptions op = new ParallelOptions() { MaxDegreeOfParallelism = 8 }; Parallel.For(0, runs, op, (run) => { cumModelUse[run] = new List <double>(); // instantiate world World thisWorld = new StochasticRewardGridWorld(); // load map thisWorld.Load("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\map4choiceB.bmp"); // add agent System.Threading.Thread.Sleep(run * 100); // staggered instantiation to avoid identical random number generators thisWorld.addAgent(typeof(EGreedyPolicy <,>), typeof(ModelBasedValue <,>)); //thisWorld.addAgent(typeof(EGreedyPolicy<,>), typeof(ContextSwitchValue<,>), 8, 100); // run PerformanceStats stats = new PerformanceStats(); while (stats.stepsToGoal.Count <= goalCt) { stats = thisWorld.stepAgent(""); if (stats.stepsToGoal.Last() == 0) { cumModelUse[run].Add(stats.modelAccesses + stats.modelUpdates); Console.WriteLine("run " + run.ToString() + " goal count: " + stats.stepsToGoal.Count); } } stepsToGoal[run] = stats.stepsToGoal; }); System.IO.StreamWriter writer = new System.IO.StreamWriter("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\stepsToGoalStochasticMBRL.csv"); for (int i = 0; i < stepsToGoal[0].Count; i++) { List <string> line = new List <string>(); foreach (List <double> series in stepsToGoal) { line.Add(series[i].ToString()); } writer.WriteLine(string.Join(",", line)); } writer.Flush(); writer.Close(); writer = new System.IO.StreamWriter("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\modelUseStochasticMBRL.csv"); for (int i = 0; i < cumModelUse[0].Count; i++) { List <string> line = new List <string>(); foreach (List <double> series in cumModelUse) { line.Add(series[i].ToString()); } writer.WriteLine(string.Join(",", line)); } writer.Flush(); writer.Close(); //// Lesion study //int runs = 8; //int goalCt = 25; //List<double>[] results = new List<double>[runs]; //System.Diagnostics.Stopwatch sw = new System.Diagnostics.Stopwatch(); //sw.Start(); //ParallelOptions op = new ParallelOptions() //{ // MaxDegreeOfParallelism = 8 //}; //Parallel.For(0, runs, op, (run) => ////for (int run = 0; run < runs; run++) //{ // // instantiate world // World thisWorld = new GridWorld(); // // load map // thisWorld.Load("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\map3LargeMod.bmp"); // // load agent // System.Threading.Thread.Sleep(run * 100); // staggered instantiation to avoid identical random number generators // //thisWorld.addAgent(typeof(SoftmaxPolicy<,>), typeof(MultiGridWorldModel<,>), 8, 4); // thisWorld.addAgent(typeof(EGreedyPolicy<,>), typeof(MultiResValue<,>), 1, 0); // // run // PerformanceStats stats = new PerformanceStats(); // while (stats.stepsToGoal.Count <= goalCt) // { // stats = thisWorld.stepAgent(""); // } // results[run] = stats.stepsToGoal; //}); //sw.Stop(); //Console.WriteLine(sw.Elapsed.TotalSeconds.ToString()); //System.IO.StreamWriter writer = new System.IO.StreamWriter("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\data.csv"); //for (int i = 0; i < goalCt; i++) //{ // List<string> line = new List<string>(); // foreach (List<double> series in results) // { // line.Add(series[i].ToString()); // } // writer.WriteLine(string.Join(",", line)); //} //writer.Flush(); //writer.Close(); //// Post-training Lesion study //int runs = 7; //int goalCt = 2; //List<double>[] results = new List<double>[runs]; //System.Diagnostics.Stopwatch sw = new System.Diagnostics.Stopwatch(); //sw.Start(); //Parallel.For(0, runs, (run) => ////for (int run = 0; run < runs; run++) //{ // // instantiate world // World thisWorld = new GridWorld(); // // load map // thisWorld.Load("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\map3.bmp"); // // load agent // System.Threading.Thread.Sleep(run * 100); // staggered instantiation to avoid identical random number generators // //thisWorld.addAgent(typeof(SoftMaxPolicy<,>), typeof(MultiGridWorldModel<,>), 8, 4); // Agent<int[], int[]> agent = (Agent<int[], int[]>)thisWorld.addAgent(typeof(EGreedyPolicy<,>), typeof(MultiGridWorldModel<,>), 8, 4); // // run // PerformanceStats stats = new PerformanceStats(); // while (stats.stepsToGoal.Count <= goalCt) // { // stats = thisWorld.stepAgent(""); // } // // change environment // thisWorld.Load("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\map3b.bmp"); // // lesion vH // MultiGridWorldModel<int[], int[]> model = (MultiGridWorldModel<int[], int[]>)agent._actionValue; // model.LesionVH(1); // // run // while (stats.stepsToGoal.Count <= goalCt * 2) // { // stats = thisWorld.stepAgent(""); // } // results[run] = stats.stepsToGoal; //}); //sw.Stop(); //Console.WriteLine(sw.Elapsed.TotalSeconds.ToString()); //System.IO.StreamWriter writer = new System.IO.StreamWriter("C:\\Users\\Eric\\Google Drive\\Lethbridge Projects\\data.csv"); //for (int i = 0; i < goalCt; i++) //{ // List<string> line = new List<string>(); // foreach (List<double> series in results) // { // line.Add(series[i].ToString()); // } // writer.WriteLine(string.Join(",", line)); //} //writer.Flush(); //writer.Close(); }
public override void Remove(string[] keys, WorkContext context) { DeletionBatchInfo deletionBatchInfo = null; try { deletionBatchInfo = context.ReportDeletionBatchStarted(PublicName, keys.Length); context.TransactionalStorage.Batch(actions => { var storageCommitDuration = new Stopwatch(); actions.BeforeStorageCommit += storageCommitDuration.Start; actions.AfterStorageCommit += () => { storageCommitDuration.Stop(); deletionBatchInfo.PerformanceStats.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds)); }; var reduceKeyAndBuckets = new Dictionary <ReduceKeyAndBucket, int>(); var deleteMappedResultsDuration = new Stopwatch(); using (StopwatchScope.For(deleteMappedResultsDuration)) { if (actions.MapReduce.HasMappedResultsForIndex(indexId)) { foreach (var key in keys) { actions.MapReduce.DeleteMappedResultsForDocumentId(key, indexId, reduceKeyAndBuckets); context.CancellationToken.ThrowIfCancellationRequested(); } } } deletionBatchInfo.PerformanceStats.Add(PerformanceStats.From(IndexingOperation.Delete_DeleteMappedResultsForDocumentId, deleteMappedResultsDuration.ElapsedMilliseconds)); actions.MapReduce.UpdateRemovedMapReduceStats(indexId, reduceKeyAndBuckets, context.CancellationToken); var scheduleReductionsDuration = new Stopwatch(); using (StopwatchScope.For(scheduleReductionsDuration)) { foreach (var reduceKeyAndBucket in reduceKeyAndBuckets) { actions.MapReduce.ScheduleReductions(indexId, 0, reduceKeyAndBucket.Key); context.CancellationToken.ThrowIfCancellationRequested(); } } deletionBatchInfo.PerformanceStats.Add(PerformanceStats.From(IndexingOperation.Reduce_ScheduleReductions, scheduleReductionsDuration.ElapsedMilliseconds)); }); } finally { if (deletionBatchInfo != null) { context.ReportDeletionBatchCompleted(deletionBatchInfo); } } }
protected ReducingPerformanceStats[] HandleReduceForIndex(IndexToWorkOn indexToWorkOn) { var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexToWorkOn.IndexId); if (viewGenerator == null) { return(null); } bool operationCanceled = false; var itemsToDelete = new ConcurrentSet <object>(); IList <ReduceTypePerKey> mappedResultsInfo = null; transactionalStorage.Batch(actions => { mappedResultsInfo = actions.MapReduce.GetReduceTypesPerKeys(indexToWorkOn.IndexId, context.CurrentNumberOfItemsToReduceInSingleBatch, context.NumberOfItemsToExecuteReduceInSingleStep).ToList(); }); var singleStepReduceKeys = mappedResultsInfo.Where(x => x.OperationTypeToPerform == ReduceType.SingleStep).Select(x => x.ReduceKey).ToArray(); var multiStepsReduceKeys = mappedResultsInfo.Where(x => x.OperationTypeToPerform == ReduceType.MultiStep).Select(x => x.ReduceKey).ToArray(); currentlyProcessedIndexes.TryAdd(indexToWorkOn.IndexId, indexToWorkOn.Index); var performanceStats = new List <ReducingPerformanceStats>(); try { if (singleStepReduceKeys.Length > 0) { Log.Debug("SingleStep reduce for keys: {0}", singleStepReduceKeys.Select(x => x + ",")); var singleStepStats = SingleStepReduce(indexToWorkOn, singleStepReduceKeys, viewGenerator, itemsToDelete); performanceStats.Add(singleStepStats); } if (multiStepsReduceKeys.Length > 0) { Log.Debug("MultiStep reduce for keys: {0}", multiStepsReduceKeys.Select(x => x + ",")); var multiStepStats = MultiStepReduce(indexToWorkOn, multiStepsReduceKeys, viewGenerator, itemsToDelete); performanceStats.Add(multiStepStats); } } catch (OperationCanceledException) { operationCanceled = true; } finally { var postReducingOperations = new ReduceLevelPeformanceStats { Level = -1, Started = SystemTime.UtcNow }; if (operationCanceled == false) { var deletingScheduledReductionsDuration = new Stopwatch(); var storageCommitDuration = new Stopwatch(); // whatever we succeeded in indexing or not, we have to update this // because otherwise we keep trying to re-index failed mapped results transactionalStorage.Batch(actions => { actions.BeforeStorageCommit += storageCommitDuration.Start; actions.AfterStorageCommit += storageCommitDuration.Stop; ScheduledReductionInfo latest; using (StopwatchScope.For(deletingScheduledReductionsDuration)) { latest = actions.MapReduce.DeleteScheduledReduction(itemsToDelete); } if (latest == null) { return; } actions.Indexing.UpdateLastReduced(indexToWorkOn.Index.indexId, latest.Etag, latest.Timestamp); }); postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_DeleteScheduledReductions, deletingScheduledReductionsDuration.ElapsedMilliseconds)); postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds)); } postReducingOperations.Completed = SystemTime.UtcNow; postReducingOperations.Duration = postReducingOperations.Completed - postReducingOperations.Started; performanceStats.Add(new ReducingPerformanceStats(ReduceType.None) { LevelStats = new List <ReduceLevelPeformanceStats> { postReducingOperations } }); Index _; currentlyProcessedIndexes.TryRemove(indexToWorkOn.IndexId, out _); } return(performanceStats.ToArray()); }