Example #1
0
    public async Task TestTimeoutEvenWhenServerHangs()
    {
        await using var harness = await new MiniKuduClusterBuilder()
                                  .AddTabletServerFlag("--scanner_inject_latency_on_each_batch_ms=200000")
                                  .BuildHarnessAsync();

        await using var client = harness.CreateClient();

        var tableBuilder = ClientTestUtil.GetBasicSchema()
                           .SetTableName(nameof(TestTimeoutEvenWhenServerHangs));

        var table = await client.CreateTableAsync(tableBuilder);

        var row = ClientTestUtil.CreateBasicSchemaInsert(table, 1);
        await client.WriteAsync(new[] { row });

        var scanner = client.NewScanBuilder(table).Build();

        // Scan with a short timeout.
        var timeout = TimeSpan.FromSeconds(1);

        using var cts = new CancellationTokenSource(timeout);

        // The server will not respond for the lifetime of the test, so we
        // expect the operation to time out.
        await Assert.ThrowsAsync <OperationCanceledException>(
            async() => await scanner.CountAsync(cts.Token));
    }
    public async Task TestSubmitWriteOpAfterCommit()
    {
        await using var harness = await new MiniKuduClusterBuilder()
                                  .AddMasterServerFlag("--txn_manager_enabled")
                                  .AddTabletServerFlag("--enable_txn_system_client_init=true")
                                  .BuildHarnessAsync();

        await using var client = harness.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestSubmitWriteOpAfterCommit))
                      .AddHashPartitions(2, "key");

        var table = await client.CreateTableAsync(builder);

        using var transaction = await client.NewTransactionAsync();

        int key     = 0;
        var insert1 = ClientTestUtil.CreateBasicSchemaInsert(table, key++);
        await transaction.WriteAsync(new[] { insert1 });

        await transaction.CommitAsync();

        await transaction.WaitForCommitAsync();

        var insert2   = ClientTestUtil.CreateBasicSchemaInsert(table, key);
        var exception = await Assert.ThrowsAsync <NonRecoverableException>(
            async() => await transaction.WriteAsync(new[] { insert2 }));

        Assert.Matches(".* transaction ID .* not open: COMMITTED", exception.Message);
    }
Example #3
0
    private async Task <long> SetupTableForDiffScansAsync(KuduTable table, int numRows)
    {
        for (int i = 0; i < numRows / 2; i++)
        {
            var row = ClientTestUtil.CreateBasicSchemaInsert(table, i);
            await _session.EnqueueAsync(row);
        }

        await _session.FlushAsync();

        // Grab the timestamp, then add more data so there's a diff.
        long timestamp = _client.LastPropagatedTimestamp;

        for (int i = numRows / 2; i < numRows; i++)
        {
            var row = ClientTestUtil.CreateBasicSchemaInsert(table, i);
            await _session.EnqueueAsync(row);
        }

        await _session.FlushAsync();

        // Delete some data so the is_deleted column can be tested.
        for (int i = 0; i < numRows / 4; i++)
        {
            var row = table.NewDelete();
            row.SetInt32(0, i);
            await _session.EnqueueAsync(row);
        }

        await _session.FlushAsync();

        return(timestamp);
    }
Example #4
0
    public async Task TestInsertAfterInsertIgnoreHasRowError()
    {
        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestInsertAfterInsertIgnoreHasRowError));

        var table = await _client.CreateTableAsync(builder);

        var rows = new[]
        {
            ClientTestUtil.CreateBasicSchemaInsertIgnore(table, 1),
            ClientTestUtil.CreateBasicSchemaInsert(table, 1)
        };

        var exception = await Assert.ThrowsAsync <KuduWriteException>(
            () => _client.WriteAsync(rows));

        var rowError = Assert.Single(exception.PerRowErrors);

        Assert.True(rowError.IsAlreadyPresent);

        var rowStrings = await ClientTestUtil.ScanTableToStringsAsync(_client, table);

        var rowString = Assert.Single(rowStrings);

        Assert.Equal(
            "INT32 key=1, INT32 column1_i=2, INT32 column2_i=3, " +
            "STRING column3_s=a string, BOOL column4_b=True", rowString);
    }
Example #5
0
    public async Task TestCount()
    {
        await using var miniCluster = await new MiniKuduClusterBuilder().BuildAsync();
        await using var client      = miniCluster.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestCount))
                      .AddHashPartitions(4, "key");

        var table = await client.CreateTableAsync(builder);

        int numRows = 123;
        var rows    = Enumerable.Range(0, numRows)
                      .Select(i => ClientTestUtil.CreateBasicSchemaInsert(table, i));

        await client.WriteAsync(rows);

        var scanner = client.NewScanBuilder(table)
                      .SetEmptyProjection()
                      .SetReadMode(ReadMode.ReadYourWrites)
                      .Build();

        long numScannedRows = await scanner.CountAsync();

        Assert.Empty(scanner.ProjectionSchema.Columns);
        Assert.Equal(numRows, numScannedRows);
    }
Example #6
0
    public async Task TestUpdateIgnore()
    {
        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestUpdateIgnore));

        var table = await _client.CreateTableAsync(builder);

        // Test update ignore does not return a row error.
        var update1 = ClientTestUtil.CreateBasicSchemaUpdateIgnore(table, 1, 1, false);
        await _client.WriteAsync(new[] { update1 });

        Assert.Empty(await ClientTestUtil.ScanTableToStringsAsync(_client, table));

        var insert = ClientTestUtil.CreateBasicSchemaInsert(table, 1);
        await _client.WriteAsync(new[] { insert });

        Assert.Single(await ClientTestUtil.ScanTableToStringsAsync(_client, table));

        // Test update ignore implements normal update.
        var update2 = ClientTestUtil.CreateBasicSchemaUpdateIgnore(table, 1, 2, false);
        await _client.WriteAsync(new[] { update2 });

        var rowStrings = await ClientTestUtil.ScanTableToStringsAsync(_client, table);

        var rowString = Assert.Single(rowStrings);

        Assert.Equal(
            "INT32 key=1, INT32 column1_i=2, INT32 column2_i=3, " +
            "STRING column3_s=a string, BOOL column4_b=True", rowString);
    }
    private async Task WriteAsync(CancellationToken cancellationToken)
    {
        Exception sessionException = null;
        Exception exception;

        ValueTask HandleSessionExceptionAsync(SessionExceptionContext context)
        {
            Volatile.Write(ref sessionException, context.Exception);
            return(new ValueTask());
        }

        var options = new KuduSessionOptions
        {
            ExceptionHandler = HandleSessionExceptionAsync
        };

        await using var session = _client.NewSession(options);

        int  currentRowKey = 0;
        bool flush         = false;

        while (true)
        {
            if (cancellationToken.IsCancellationRequested)
            {
                break;
            }

            exception = Volatile.Read(ref sessionException);
            if (exception != null)
            {
                throw exception;
            }

            var row = ClientTestUtil.CreateBasicSchemaInsert(_table, currentRowKey);
            await session.EnqueueAsync(row, CancellationToken.None);

            if (flush)
            {
                await session.FlushAsync(CancellationToken.None);
            }

            // Every 10 rows we flush and change the flush mode randomly.
            if (currentRowKey % 10 == 0)
            {
                flush = ThreadSafeRandom.Instance.NextBool();
            }

            currentRowKey++;
        }

        await session.FlushAsync(CancellationToken.None);

        exception = Volatile.Read(ref sessionException);
        if (exception != null)
        {
            throw exception;
        }
    }
    public async Task TestTxnKeepaliveSwitchesToOtherTxnManager()
    {
        await using var harness = await new MiniKuduClusterBuilder()
                                  .AddMasterServerFlag("--txn_manager_enabled")
                                  // Set Raft heartbeat interval short for faster test runtime: speed up
                                  // leader failure detection and new leader election.
                                  .AddMasterServerFlag("--raft_heartbeat_interval_ms=100")
                                  // The txn keepalive interval should be long enough to accommodate Raft
                                  // leader failure detection and election.
                                  .AddTabletServerFlag("--txn_keepalive_interval_ms=1000")
                                  .AddTabletServerFlag("--txn_staleness_tracker_interval_ms=250")
                                  .AddTabletServerFlag("--enable_txn_system_client_init=true")
                                  .BuildHarnessAsync();

        await using var client = harness.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestTxnKeepaliveSwitchesToOtherTxnManager))
                      .AddHashPartitions(2, "key");

        var table = await client.CreateTableAsync(builder);

        using var transaction = await client.NewTransactionAsync();

        var insert = ClientTestUtil.CreateBasicSchemaInsert(table, 0);
        await transaction.WriteAsync(new[] { insert });

        await harness.KillLeaderMasterServerAsync();

        // Wait for two keepalive intervals to make sure the backend got a chance
        // to automatically abort the transaction if not receiving txn keepalive
        // messages.
        await Task.Delay(2 * 1000);

        // It should be possible to commit the transaction. This is to verify that
        //
        //   * the client eventually starts sending txn keepalive messages to other
        //     TxnManager instance (the original was hosted by former leader master
        //     which is no longer available), so the backend doesn't abort the
        //     transaction automatically due to not receiving keepalive messages
        //
        //   * the client switches to the new TxnManager for other txn-related
        //     operations as well
        await transaction.CommitAsync();

        await transaction.WaitForCommitAsync();

        // An extra sanity check: read back the rows written into the table in the
        // context of the transaction.
        var scanner = client.NewScanBuilder(table)
                      .SetReadMode(ReadMode.ReadYourWrites)
                      .SetReplicaSelection(ReplicaSelection.LeaderOnly)
                      .Build();

        Assert.Equal(1, await scanner.CountAsync());
    }
    public async Task TestPropagateTxnCommitTimestamp()
    {
        await using var harness = await new MiniKuduClusterBuilder()
                                  .AddMasterServerFlag("--txn_manager_enabled")
                                  // Inject latency to have a chance spotting the transaction in the
                                  // FINALIZE_IN_PROGRESS state and make KuduTransaction.WaitForCommitAsync()
                                  // have to poll multiple times.
                                  .AddTabletServerFlag("--txn_status_manager_inject_latency_finalize_commit_ms=250")
                                  .AddTabletServerFlag("--enable_txn_system_client_init=true")
                                  .BuildHarnessAsync();

        await using var client = harness.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestPropagateTxnCommitTimestamp))
                      .AddHashPartitions(8, "key");

        var table = await client.CreateTableAsync(builder);

        // Make sure the commit timestamp for a transaction is propagated to the
        // client upon committing a transaction.
        using (var transaction = await client.NewTransactionAsync())
        {
            // Insert many rows: the goal is to get at least one row inserted into
            // every tablet of the hash-partitioned test table, so every tablet would
            // be a participant in the transaction, and most likely every tablet
            // server would be involved.
            var inserts = Enumerable
                          .Range(0, 128)
                          .Select(key => ClientTestUtil.CreateBasicSchemaInsert(table, key));

            await transaction.WriteAsync(inserts);
            await CommitAndVerifyTransactionAsync(client, transaction);
        }

        // Make sure the commit timestamp for a transaction is propagated to the
        // client upon committing a transaction (using a session).
        using (var transaction = await client.NewTransactionAsync())
        {
            await using var session = transaction.NewSession();

            // Insert many rows: the goal is to get at least one row inserted into
            // every tablet of the hash-partitioned test table, so every tablet would
            // be a participant in the transaction, and most likely every tablet
            // server would be involved.
            for (int key = 128; key < 256; key++)
            {
                var insert = ClientTestUtil.CreateBasicSchemaInsert(table, key);
                await session.EnqueueAsync(insert);
            }

            await session.FlushAsync();
            await CommitAndVerifyTransactionAsync(client, transaction);
        }
    }
    private static async ValueTask InsertRowsAsync(
        IKuduSession session, KuduTable table, int startRow, int numRows)
    {
        var end = startRow + numRows;

        for (var i = startRow; i < end; i++)
        {
            var insert = ClientTestUtil.CreateBasicSchemaInsert(table, i);
            await session.EnqueueAsync(insert);
        }
    }
Example #11
0
    public async Task TestGetTableStatistics()
    {
        await using var harness = await new MiniKuduClusterBuilder()
                                  .AddTabletServerFlag("--update_tablet_stats_interval_ms=200")
                                  .AddTabletServerFlag("--heartbeat_interval_ms=100")
                                  .BuildHarnessAsync();

        await using var client = harness.CreateClient();

        // Create a table.
        var builder = ClientTestUtil.GetBasicSchema().SetTableName(_tableName);
        var table   = await client.CreateTableAsync(builder);

        // Insert some rows and test the statistics.
        var prevStatistics    = new KuduTableStatistics(-1, -1);
        var currentStatistics = new KuduTableStatistics(-1, -1);
        var session           = client.NewSession();
        int num = 100;

        for (int i = 0; i < num; ++i)
        {
            // Get current table statistics.
            currentStatistics = await client.GetTableStatisticsAsync(_tableName);

            Assert.True(currentStatistics.OnDiskSize >= prevStatistics.OnDiskSize);
            Assert.True(currentStatistics.LiveRowCount >= prevStatistics.LiveRowCount);
            Assert.True(currentStatistics.LiveRowCount <= i + 1);
            prevStatistics = currentStatistics;
            // Insert row.
            var insert = ClientTestUtil.CreateBasicSchemaInsert(table, i);
            await session.EnqueueAsync(insert);

            await session.FlushAsync();

            long numRows = await ClientTestUtil.CountRowsAsync(client, table);

            Assert.Equal(i + 1, numRows);
        }

        // Final accuracy test.
        // Wait for master to aggregate table statistics.
        await Task.Delay(200 * 6);

        currentStatistics = await client.GetTableStatisticsAsync(_tableName);

        Assert.True(currentStatistics.OnDiskSize >= prevStatistics.OnDiskSize);
        Assert.True(currentStatistics.LiveRowCount >= prevStatistics.LiveRowCount);
        Assert.Equal(num, currentStatistics.LiveRowCount);
    }
Example #12
0
    public async Task TestSwitchToOtherTxnManagerInFlightCalls()
    {
        await using var harness = await new MiniKuduClusterBuilder()
                                  .AddMasterServerFlag("--txn_manager_enabled")
                                  // Set Raft heartbeat interval short for faster test runtime: speed up
                                  // leader failure detection and new leader election.
                                  .AddMasterServerFlag("--raft_heartbeat_interval_ms=100")
                                  .AddTabletServerFlag("--enable_txn_system_client_init=true")
                                  .BuildHarnessAsync();

        await using var client = harness.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestSwitchToOtherTxnManagerInFlightCalls))
                      .AddHashPartitions(2, "key");

        var table = await client.CreateTableAsync(builder);

        using var transaction = await client.NewTransactionAsync();

        var insert = ClientTestUtil.CreateBasicSchemaInsert(table, 0);
        await transaction.WriteAsync(new[] { insert });

        await harness.KillAllMasterServersAsync();

        var startMastersTask = Task.Run(async() =>
        {
            // Sleep for some time to allow the commit call
            // below issue RPCs to non-running TxnManangers.
            await Task.Delay(1000);
            await harness.StartAllMasterServersAsync();
        });

        // It should be possible to commit the transaction.
        await transaction.CommitAsync();

        await transaction.WaitForCommitAsync();

        await startMastersTask;

        // An extra sanity check: read back the rows written into the table in the
        // context of the transaction.
        var scanner = client.NewScanBuilder(table)
                      .SetReadMode(ReadMode.ReadYourWrites)
                      .SetReplicaSelection(ReplicaSelection.LeaderOnly)
                      .Build();

        Assert.Equal(1, await scanner.CountAsync());
    }
Example #13
0
    public async Task TestExceptionCallback()
    {
        int numCallbacks = 0;
        SessionExceptionContext sessionContext = null;

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestExceptionCallback));

        var table = await _client.CreateTableAsync(builder);

        var row1 = ClientTestUtil.CreateBasicSchemaInsert(table, 1);
        var row2 = ClientTestUtil.CreateBasicSchemaInsert(table, 1);

        var sessionOptions = new KuduSessionOptions
        {
            ExceptionHandler = HandleSessionExceptionAsync
        };

        await using var session = _client.NewSession(sessionOptions);

        await session.EnqueueAsync(row1);

        await session.FlushAsync();

        await session.EnqueueAsync(row2);

        await session.FlushAsync();

        ValueTask HandleSessionExceptionAsync(SessionExceptionContext context)
        {
            numCallbacks++;
            sessionContext = context;
            return(new ValueTask());
        }

        Assert.Equal(1, numCallbacks);

        var errorRow = Assert.Single(sessionContext.Rows);

        Assert.Same(row2, errorRow);

        var exception    = Assert.IsType <KuduWriteException>(sessionContext.Exception);
        var exceptionRow = Assert.Single(exception.PerRowErrors);

        Assert.True(exceptionRow.IsAlreadyPresent);
    }
Example #14
0
    public async Task TestScannerExpiration()
    {
        await using var miniCluster = await new MiniKuduClusterBuilder()
                                      .AddTabletServerFlag($"--scanner_ttl_ms={ShortScannerTtlMs}")
                                      .AddTabletServerFlag($"--scanner_gc_check_interval_us={ShortScannerGcUs}")
                                      .BuildAsync();
        await using var client = miniCluster.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName("TestScannerExpiration")
                      .AddHashPartitions(2, "key");

        var table = await client.CreateTableAsync(builder);

        int numRows = 1000;
        var rows    = Enumerable.Range(0, numRows)
                      .Select(i => ClientTestUtil.CreateBasicSchemaInsert(table, i));

        await client.WriteAsync(rows);

        var scanner = client.NewScanBuilder(table)
                      .SetReadMode(ReadMode.ReadYourWrites)
                      .SetReplicaSelection(ReplicaSelection.ClosestReplica)
                      .SetBatchSizeBytes(100) // Use a small batch size so we get many batches.
                      .Build();

        var scanEnumerator = scanner.GetAsyncEnumerator();

        // Initialize the scanner and verify we can read rows.
        Assert.True(await scanEnumerator.MoveNextAsync());
        Assert.True(scanEnumerator.Current.Count > 0);

        // Wait for the scanner to time out.
        await Task.Delay(ShortScannerTtlMs * 2);

        var exception = await Assert.ThrowsAsync <NonRecoverableException>(async() =>
                                                                           await scanEnumerator.MoveNextAsync());

        Assert.Matches(".*Scanner .* not found.*", exception.Message);

        // Closing an expired scanner shouldn't throw an exception.
        await scanEnumerator.DisposeAsync();
    }
    public async Task TestFailover(bool restart)
    {
        await using var harness = await new MiniKuduClusterBuilder()
                                  .NumMasters(3)
                                  .NumTservers(3)
                                  .BuildHarnessAsync();

        await using var client = harness.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName("LeaderFailoverTest")
                      .CreateBasicRangePartition();
        var table = await client.CreateTableAsync(builder);

        var rows = Enumerable.Range(0, 3)
                   .Select(i => ClientTestUtil.CreateBasicSchemaInsert(table, i));

        await client.WriteAsync(rows);

        // Make sure the rows are in there before messing things up.
        long numRows = await ClientTestUtil.CountRowsAsync(client, table);

        Assert.Equal(3, numRows);

        if (restart)
        {
            await harness.RestartLeaderMasterAsync();
        }
        else
        {
            await harness.KillLeaderMasterServerAsync();
        }

        var rows2 = Enumerable.Range(3, 3)
                    .Select(i => ClientTestUtil.CreateBasicSchemaInsert(table, i));

        await client.WriteAsync(rows2);

        long numRows2 = await ClientTestUtil.CountRowsAsync(client, table);

        Assert.Equal(6, numRows2);
    }
Example #16
0
    public async Task TestTxnSessionClose()
    {
        await using var harness = await new MiniKuduClusterBuilder()
                                  .AddMasterServerFlag("--txn_manager_enabled")
                                  .AddTabletServerFlag("--enable_txn_system_client_init=true")
                                  .BuildHarnessAsync();

        await using var client = harness.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestTxnSessionClose))
                      .AddHashPartitions(2, "key");

        var table = await client.CreateTableAsync(builder);

        // Open and close an empty transaction session.
        using (var transaction = await client.NewTransactionAsync())
        {
            await using var session = transaction.NewSession();
        }

        // Open new transaction, insert one row for a session, close the session
        // and then rollback the transaction. No rows should be persisted.
        using (var transaction = await client.NewTransactionAsync())
        {
            await using var session = transaction.NewSession();

            var insert = ClientTestUtil.CreateBasicSchemaInsert(table, 1);
            await session.EnqueueAsync(insert);

            await session.FlushAsync();

            await transaction.RollbackAsync();

            var scanner = client.NewScanBuilder(table)
                          .SetReadMode(ReadMode.ReadYourWrites)
                          .SetReplicaSelection(ReplicaSelection.LeaderOnly)
                          .Build();

            Assert.Equal(0, await scanner.CountAsync());
        }
    }
Example #17
0
    public async Task TestDeleteIgnore()
    {
        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestDeleteIgnore));

        var table = await _client.CreateTableAsync(builder);

        // Test delete ignore does not return a row error.
        var delete = ClientTestUtil.CreateBasicSchemaDeleteIgnore(table, 1);
        await _client.WriteAsync(new[] { delete });

        var insert = ClientTestUtil.CreateBasicSchemaInsert(table, 1);
        await _client.WriteAsync(new[] { insert });

        Assert.Single(await ClientTestUtil.ScanTableToStringsAsync(_client, table));

        // Test delete ignore implements normal delete.
        await _client.WriteAsync(new[] { delete });

        Assert.Empty(await ClientTestUtil.ScanTableToStringsAsync(_client, table));
    }
Example #18
0
    public async Task TestInsertIgnoreAfterInsertHasNoRowError()
    {
        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestInsertIgnoreAfterInsertHasNoRowError));

        var table = await _client.CreateTableAsync(builder);

        var rows = new[]
        {
            ClientTestUtil.CreateBasicSchemaInsert(table, 1),
            ClientTestUtil.CreateBasicSchemaUpsert(table, 1, 1, false),
            ClientTestUtil.CreateBasicSchemaInsertIgnore(table, 1)
        };

        await _client.WriteAsync(rows);

        var rowStrings = await ClientTestUtil.ScanTableToStringsAsync(_client, table);

        var rowString = Assert.Single(rowStrings);

        Assert.Equal(
            "INT32 key=1, INT32 column1_i=1, INT32 column2_i=3, " +
            "STRING column3_s=a string, BOOL column4_b=True", rowString);
    }
Example #19
0
    public async Task TestKeepAlive()
    {
        await using var miniCluster = await new MiniKuduClusterBuilder()
                                      .AddTabletServerFlag($"--scanner_ttl_ms={ShortScannerTtlMs}")
                                      .AddTabletServerFlag($"--scanner_gc_check_interval_us={ShortScannerGcUs}")
                                      .BuildAsync();
        await using var client = miniCluster.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName("TestKeepAlive")
                      .AddHashPartitions(2, "key");

        var table = await client.CreateTableAsync(builder);

        int numRows = 1000;
        var rows    = Enumerable.Range(0, numRows)
                      .Select(i => ClientTestUtil.CreateBasicSchemaInsert(table, i));

        await client.WriteAsync(rows);

        var scanner = client.NewScanBuilder(table)
                      .SetReadMode(ReadMode.ReadYourWrites)
                      .SetReplicaSelection(ReplicaSelection.LeaderOnly)
                      .SetBatchSizeBytes(100) // Use a small batch size so we get many batches.
                      .Build();

        await using var scanEnumerator = scanner.GetAsyncEnumerator();

        // KeepAlive on uninitialized scanner should be ok.
        await scanEnumerator.KeepAliveAsync();

        // Get the first batch and initialize the scanner.
        Assert.True(await scanEnumerator.MoveNextAsync());
        long accum = scanEnumerator.Current.Count;

        while (await scanEnumerator.MoveNextAsync())
        {
            accum += scanEnumerator.Current.Count;

            // Break when we are between tablets.
            if (scanEnumerator.Tablet is null)
            {
                break;
            }

            // Ensure we actually end up between tablets.
            Assert.NotEqual(numRows, accum);
        }

        // In between scanners now and should be ok.
        await scanEnumerator.KeepAliveAsync();

        // Initialize the next scanner or keepAlive will have no effect.
        Assert.True(await scanEnumerator.MoveNextAsync());
        accum += scanEnumerator.Current.Count;

        // Wait for longer than the scanner ttl calling keepAlive throughout.
        // Each loop sleeps 20% of the scanner ttl and we loop 12 times to ensure
        // we extend over 2x the scanner ttl.
        for (int i = 0; i < 12; i++)
        {
            await Task.Delay(ShortScannerTtlMs / 5);

            await scanEnumerator.KeepAliveAsync();
        }

        // Finish out the rows.
        while (await scanEnumerator.MoveNextAsync())
        {
            accum += scanEnumerator.Current.Count;
        }

        Assert.Equal(numRows, accum);

        // At this point the scanner is closed and there is nothing to keep alive.
        var exception = await Assert.ThrowsAsync <Exception>(async() =>
                                                             await scanEnumerator.KeepAliveAsync());

        Assert.Contains("Scanner has already been closed", exception.Message);
    }
Example #20
0
    public async Task TestScanTokensInterleavedRangePartitionDrops()
    {
        int numRows = 30;

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(_tableName)
                      .AddHashPartitions(2, "key")
                      .CreateBasicRangePartition()
                      .AddRangePartition((lower, upper) =>
        {
            lower.SetInt32("key", 0);
            upper.SetInt32("key", numRows / 3);
        })
                      .AddRangePartition((lower, upper) =>
        {
            lower.SetInt32("key", numRows / 3);
            upper.SetInt32("key", 2 * numRows / 3);
        })
                      .AddRangePartition((lower, upper) =>
        {
            lower.SetInt32("key", 2 * numRows / 3);
            upper.SetInt32("key", numRows);
        });

        var table = await _client.CreateTableAsync(builder);

        for (int i = 0; i < numRows; i++)
        {
            var row = ClientTestUtil.CreateBasicSchemaInsert(table, i);
            await _session.EnqueueAsync(row);
        }

        await _session.FlushAsync();

        // Build the scan tokens.
        List <KuduScanToken> tokens = await _client.NewScanTokenBuilder(table)
                                      .BuildAsync();

        Assert.Equal(6, tokens.Count);

        // Drop the range partition [10, 20).
        await _client.AlterTableAsync(new AlterTableBuilder(table)
                                      .DropRangePartition((lower, upper) =>
        {
            lower.SetInt32("key", numRows / 3);
            upper.SetInt32("key", 2 * numRows / 3);
        }));

        // Rehydrate the tokens.
        var scanners = new List <KuduScanner>();

        foreach (var token in tokens)
        {
            var scanBuilder = await _client.NewScanBuilderFromTokenAsync(token);

            var scanner = scanBuilder.Build();

            scanners.Add(scanner);
        }

        // Drop the range partition [20, 30).
        await _client.AlterTableAsync(new AlterTableBuilder(table)
                                      .DropRangePartition((lower, upper) =>
        {
            lower.SetInt32("key", 2 * numRows / 3);
            upper.SetInt32("key", numRows);
        }));

        // Check the scanners work. The scanners for the tablets in the range
        // [10, 20) definitely won't see any rows. The scanners for the tablets
        // in the range [20, 30) might see rows.
        long scannedRows = 0;

        foreach (var scanner in scanners)
        {
            scannedRows += await scanner.CountAsync();
        }

        Assert.True(scannedRows >= numRows / 3);
        Assert.True(scannedRows <= 2 * numRows / 3);
    }
    public async Task TestMultipleFailover(bool restart)
    {
        int rowsPerIteration  = 3;
        int numIterations     = 10;
        int totalRowsToInsert = rowsPerIteration + numIterations * rowsPerIteration;

        await using var harness = await new MiniKuduClusterBuilder()
                                  .NumMasters(3)
                                  .NumTservers(3)
                                  .BuildHarnessAsync();

        await using var client = harness.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName("MultipleLeaderFailoverTest");

        var table = await client.CreateTableAsync(builder);

        await using var session = client.NewSession();

        for (int i = 0; i < rowsPerIteration; i++)
        {
            var row = ClientTestUtil.CreateBasicSchemaInsert(table, i);
            await session.EnqueueAsync(row);
        }

        await session.FlushAsync();

        var rowCount = await ClientTestUtil.CountRowsAsync(client, table);

        Assert.Equal(rowsPerIteration, rowCount);

        int currentRows = rowsPerIteration;

        for (int i = 0; i < numIterations; i++)
        {
            var tablets = await client.GetTableLocationsAsync(
                table.TableId, Array.Empty <byte>(), 1);

            Assert.Single(tablets);

            if (restart)
            {
                await harness.RestartTabletServerAsync(tablets[0]);
            }
            else
            {
                await harness.KillTabletLeaderAsync(tablets[0]);
            }

            for (int j = 0; j < rowsPerIteration; j++)
            {
                var row = ClientTestUtil.CreateBasicSchemaInsert(table, currentRows);
                await session.EnqueueAsync(row);

                currentRows++;
            }

            await session.FlushAsync();

            if (!restart)
            {
                await harness.StartAllTabletServersAsync();
            }

            rowCount = await ClientTestUtil.CountRowsAsync(client, table);

            Assert.Equal(currentRows, rowCount);
        }

        rowCount = await ClientTestUtil.CountRowsAsync(client, table);

        Assert.Equal(totalRowsToInsert, rowCount);
    }
Example #22
0
    public async Task TestTxnKeepaliveRollingSwitchToOtherTxnManager()
    {
        await using var harness = await new MiniKuduClusterBuilder()
                                  .AddMasterServerFlag("--txn_manager_enabled")
                                  // Set Raft heartbeat interval short for faster test runtime: speed up
                                  // leader failure detection and new leader election.
                                  .AddMasterServerFlag("--raft_heartbeat_interval_ms=100")
                                  // The txn keepalive interval should be long enough to accommodate Raft
                                  // leader failure detection and election.
                                  .AddTabletServerFlag("--txn_keepalive_interval_ms=1000")
                                  .AddTabletServerFlag("--txn_staleness_tracker_interval_ms=250")
                                  .AddTabletServerFlag("--enable_txn_system_client_init=true")
                                  .BuildHarnessAsync();

        await using var client = harness.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestTxnKeepaliveRollingSwitchToOtherTxnManager))
                      .AddHashPartitions(2, "key");

        var table = await client.CreateTableAsync(builder);

        using var transaction = await client.NewTransactionAsync();

        // Cycle the leadership among masters, making sure the client successfully
        // switches to every newly elected leader master to send keepalive messages.
        int numMasters = harness.GetMasterServers().Count;

        for (int i = 0; i < numMasters; i++)
        {
            // Shutdown the leader master.
            var hostPort = await harness.KillLeaderMasterServerAsync();

            // Wait for two keepalive intervals to give the backend a chance
            // to automatically abort the transaction if not receiving txn keepalive
            // messages.
            await Task.Delay(2 * 1000);

            // The transaction should be still alive.
            var exception = await Assert.ThrowsAsync <NonRecoverableException>(
                async() => await transaction.WaitForCommitAsync());

            Assert.True(exception.Status.IsIllegalState);
            Assert.Equal("transaction is still open", exception.Message);

            // In addition, it should be possible to insert rows in the context
            // of the transaction.
            var insert = ClientTestUtil.CreateBasicSchemaInsert(table, i);
            await transaction.WriteAsync(new[] { insert });

            // Start the master back.
            await harness.StartMasterAsync(hostPort);
        }

        // Make sure the client properly processes error responses sent back by
        // TxnManager when the TxnStatusManager isn't available. So, shutdown all
        // tablet servers: this is to make sure TxnStatusManager isn't there.
        await harness.KillAllTabletServersAsync();

        var startTabletServersTask = Task.Run(async() =>
        {
            // Sleep for some time to allow the commit call below issue RPCs when
            // TxnStatusManager is not yet around.
            await Task.Delay(2 * 1000);

            // Start all the tablet servers back so the TxnStatusManager is back.
            await harness.StartAllTabletServersAsync();
        });

        // The transaction should be still alive, and it should be possible to
        // commit it.
        await transaction.CommitAsync();

        await transaction.WaitForCommitAsync();

        await startTabletServersTask;

        // An extra sanity check: read back the rows written into the table in the
        // context of the transaction.
        var scanner = client.NewScanBuilder(table)
                      .SetReadMode(ReadMode.ReadYourWrites)
                      .SetReplicaSelection(ReplicaSelection.LeaderOnly)
                      .Build();

        Assert.Equal(numMasters, await scanner.CountAsync());
    }
Example #23
0
    public async Task TestSwitchToOtherTxnManager()
    {
        await using var harness = await new MiniKuduClusterBuilder()
                                  .AddMasterServerFlag("--txn_manager_enabled")
                                  // Set Raft heartbeat interval short for faster test runtime: speed up
                                  // leader failure detection and new leader election.
                                  .AddMasterServerFlag("--raft_heartbeat_interval_ms=100")
                                  .AddTabletServerFlag("--enable_txn_system_client_init=true")
                                  .BuildHarnessAsync();

        await using var client = harness.CreateClient();

        var builder = ClientTestUtil.GetBasicSchema()
                      .SetTableName(nameof(TestSwitchToOtherTxnManager))
                      .AddHashPartitions(2, "key");

        var table = await client.CreateTableAsync(builder);

        // Start a transaction, then restart every available TxnManager instance
        // before attempting any txn-related operation.
        using (var transaction = await client.NewTransactionAsync())
        {
            var insert = ClientTestUtil.CreateBasicSchemaInsert(table, 0);
            await transaction.WriteAsync(new[] { insert });

            await harness.KillAllMasterServersAsync();

            await harness.StartAllMasterServersAsync();

            // Querying the status of a transaction should be possible, as usual.
            // Since the transaction is still open, KuduTransaction.WaitForCommitAsync()
            // should throw corresponding exception.
            var exception = await Assert.ThrowsAsync <NonRecoverableException>(
                async() => await transaction.WaitForCommitAsync());

            Assert.True(exception.Status.IsIllegalState);
            Assert.Equal("transaction is still open", exception.Message);

            await harness.KillAllMasterServersAsync();

            await harness.StartAllMasterServersAsync();

            // It should be possible to commit the transaction.
            await transaction.CommitAsync();

            await transaction.WaitForCommitAsync();

            // An extra sanity check: read back the rows written into the table in the
            // context of the transaction.
            var scanner = client.NewScanBuilder(table)
                          .SetReadMode(ReadMode.ReadYourWrites)
                          .SetReplicaSelection(ReplicaSelection.LeaderOnly)
                          .Build();

            Assert.Equal(1, await scanner.CountAsync());
        }

        // Similar to the above, but run KuduTransaction.commit() when only 2 out
        // of 3 masters are running while the TxnManager which used to start the
        // transaction is no longer around.
        using (var transaction = await client.NewTransactionAsync())
        {
            var insert = ClientTestUtil.CreateBasicSchemaInsert(table, 1);
            await transaction.WriteAsync(new[] { insert });

            await harness.KillLeaderMasterServerAsync();

            // It should be possible to commit the transaction: 2 out of 3 masters are
            // running and Raft should be able to establish a leader master. So,
            // txn-related operations routed through TxnManager should succeed.
            await transaction.CommitAsync();

            await transaction.WaitForCommitAsync();

            // An extra sanity check: read back the rows written into the table in the
            // context of the transaction.
            var scanner = client.NewScanBuilder(table)
                          .SetReadMode(ReadMode.ReadYourWrites)
                          .SetReplicaSelection(ReplicaSelection.LeaderOnly)
                          .Build();

            // 1 row should be there from earlier, plus the one we just committed.
            Assert.Equal(2, await scanner.CountAsync());
        }
    }