// [END bigtable_filters_limit_cells_per_col] // [START bigtable_filters_limit_cells_per_row] /// <summary> /// /// Read using a cells per row filter from an existing table. ///</summary> /// <param name="projectId">Your Google Cloud Project ID.</param> /// <param name="instanceId">Your Google Cloud Bigtable Instance ID.</param> /// <param name="tableId">Your Google Cloud Bigtable table ID.</param> public string filterLimitCellsPerRow(string projectId = "YOUR-PROJECT-ID", string instanceId = "YOUR-INSTANCE-ID", string tableId = "YOUR-TABLE-ID") { // A filter that matches the first 2 cells of each row RowFilter filter = RowFilters.CellsPerRowLimit(2); return(readFilter(projectId, instanceId, tableId, filter)); }
public void Chain() { var filter = RowFilters.Chain( RowFilters.CellsPerRowLimit(1), RowFilters.ValueExact("abc")); Assert.NotNull(filter.Chain); Assert.Equal(2, filter.Chain.Filters.Count); Assert.Equal(RowFilters.CellsPerRowLimit(1), filter.Chain.Filters[0]); Assert.Equal(RowFilters.ValueExact("abc"), filter.Chain.Filters[1]); }
public void Interleave() { var filter = RowFilters.Interleave( RowFilters.CellsPerRowLimit(1), RowFilters.ValueExact("abc")); Assert.NotNull(filter.Interleave); Assert.Equal(2, filter.Interleave.Filters.Count); Assert.Equal(RowFilters.CellsPerRowLimit(1), filter.Interleave.Filters[0]); Assert.Equal(RowFilters.ValueExact("abc"), filter.Interleave.Filters[1]); }
public void TestRowFilter() { var rowFilter = RowFilters.Chain( RowFilters.CellsPerColumnLimit(1), RowFilters.CellsPerRowOffset(2), RowFilters.CellsPerRowLimit(10)); ReadRowsRequest originalRequest = CreateRowFilterRequest(rowFilter); BigtableReadRowsRequestManager underTest = new BigtableReadRowsRequestManager(originalRequest); Assert.Equal(originalRequest, underTest.BuildUpdatedRequest()); }
public override async Task <long> ReadHighestSequenceNrAsync(string persistenceId, long fromSequenceNr) { var rowRange = RowRange.ClosedOpen( ToRowKeyBigtableByteString(persistenceId, fromSequenceNr), ToRowKeyBigtableByteString(persistenceId, long.MaxValue)); var rows = RowSet.FromRowRanges(rowRange); var stream = _bigtableClient.ReadRows( _tableName, rows: rows, filter: RowFilters.Chain(RowFilters.CellsPerRowLimit(1), RowFilters.StripValueTransformer())); var lastRow = await stream.LastOrDefault().ConfigureAwait(false); return(lastRow == null ? 0L : GetSequenceNumber(lastRow)); }
public void TestRowFilter() { var rowFilter = RowFilters.Chain( RowFilters.CellsPerColumnLimit(1), RowFilters.CellsPerRowOffset(2), RowFilters.CellsPerRowLimit(10)); var originalRequest = new ReadRowsRequest { Rows = RowSet.FromRowKey("a"), Filter = rowFilter }; BigtableReadRowsRequestManager underTest = new BigtableReadRowsRequestManager(originalRequest); Assert.Equal(originalRequest, underTest.BuildUpdatedRequest()); }
public static int Main(string[] args) { // Your Google Cloud Platform project ID const string projectId = "YOUR-PROJECT-ID"; // The name of the Cloud Bigtable instance const string instanceId = "YOUR-INSTANCE-ID"; // [END bigtable_quickstart] if (projectId == "YOUR-PROJECT" + "-ID") { Console.WriteLine("Edit QuickStart.cs and replace YOUR-PROJECT-ID with your project id."); return(-1); } if (instanceId == "YOUR-INSTANCE" + "-ID") { Console.WriteLine("Edit QuickStart.cs and replace YOUR-INSTANCE-ID with your instance id."); return(-1); } // [START bigtable_quickstart] // The name of the Cloud Bigtable table const string tableId = "my-table"; try { // Creates a Bigtable client BigtableClient bigtableClient = BigtableClient.Create(); // Read a row from my-table using a row key Row row = bigtableClient.ReadRow( new TableName(projectId, instanceId, tableId), "r1", RowFilters.CellsPerRowLimit(1)); // Print the row key and data (column value, labels, timestamp) Console.WriteLine($"{"Row key:",-30}{row.Key.ToStringUtf8()}\n" + $"{" Column Family:",-30}{row.Families[0].Name}\n" + $"{" Column Qualifyer:",-30}{row.Families[0].Columns[0].Qualifier.ToStringUtf8()}\n" + $"{" Value:",-30}{row.Families[0].Columns[0].Cells[0].Value.ToStringUtf8()}\n" + $"{" Labels:",-30}{row.Families[0].Columns[0].Cells[0].Labels}\n" + $"{" Timestamp:",-30}{row.Families[0].Columns[0].Cells[0].TimestampMicros}\n"); } catch (Exception ex) { // Handle error performing the read operation Console.WriteLine($"Error reading row r1: {ex.Message}"); } return(0); }
private static void DoHelloWorld() { try { // [START bigtable_hw_connect] // BigtableTableAdminClient API lets us create, manage and delete tables. BigtableTableAdminClient bigtableTableAdminClient = BigtableTableAdminClient.Create(); // BigtableClient API lets us read and write to a table. BigtableClient bigtableClient = BigtableClient.Create(); // [END bigtable_hw_connect] // [START bigtable_hw_create_table] // Create a table with a single column family. Console.WriteLine($"Create new table: {tableId} with column family: {columnFamily}, instance: {instanceId}"); // Check whether a table with given TableName already exists. if (!TableExist(bigtableTableAdminClient)) { bigtableTableAdminClient.CreateTable( new InstanceName(projectId, instanceId), tableId, new Table { Granularity = Table.Types.TimestampGranularity.Millis, ColumnFamilies = { { columnFamily, new ColumnFamily { GcRule = new GcRule { MaxNumVersions = 1 } } } } }); // Confirm that table was created successfully. Console.WriteLine(TableExist(bigtableTableAdminClient) ? $"Table {tableId} created successfully\n" : $"There was a problem creating a table {tableId}"); } else { Console.WriteLine($"Table: {tableId} already exists"); } // [END bigtable_hw_create_table] // [START bigtable_hw_write_rows] // Initialize Google.Cloud.Bigtable.V2.TableName object. Google.Cloud.Bigtable.Common.V2.TableName tableName = new Google.Cloud.Bigtable.Common.V2.TableName(projectId, instanceId, tableId); // Write some rows /* Each row has a unique row key. * * Note: This example uses sequential numeric IDs for simplicity, but * this can result in poor performance in a production application. * Since rows are stored in sorted order by key, sequential keys can * result in poor distribution of operations across nodes. * * For more information about how to design a Bigtable schema for the * best performance, see the documentation: * * https://cloud.google.com/bigtable/docs/schema-design */ Console.WriteLine($"Write some greetings to the table {tableId}"); // Insert 1 row using MutateRow() s_greetingIndex = 0; try { bigtableClient.MutateRow(tableName, rowKeyPrefix + s_greetingIndex, MutationBuilder()); Console.WriteLine($"\tGreeting: -- {s_greetings[s_greetingIndex],-18}-- written successfully"); } catch (Exception ex) { Console.WriteLine($"\tFailed to write greeting: --{s_greetings[s_greetingIndex]}"); Console.WriteLine(ex.Message); throw; } // Insert multiple rows using MutateRows() // Build a MutateRowsRequest (contains table name and a collection of entries). MutateRowsRequest request = new MutateRowsRequest { TableNameAsTableName = tableName }; s_mapToOriginalGreetingIndex = new List <int>(); while (++s_greetingIndex < s_greetings.Length) { s_mapToOriginalGreetingIndex.Add(s_greetingIndex); // Build an entry for every greeting (consists of rowkey and a collection of mutations). string rowKey = rowKeyPrefix + s_greetingIndex; request.Entries.Add(Mutations.CreateEntry(rowKey, MutationBuilder())); } // Make the request to write multiple rows. MutateRowsResponse response = bigtableClient.MutateRows(request); // Check the status code of each entry to ensure that it was written successfully. foreach (MutateRowsResponse.Types.Entry entry in response.Entries) { s_greetingIndex = s_mapToOriginalGreetingIndex[(int)entry.Index]; if (entry.Status.Code == 0) { Console.WriteLine($"\tGreeting: -- {s_greetings[s_greetingIndex],-18}-- written successfully"); } else { Console.WriteLine($"\tFailed to write greeting: --{s_greetings[s_greetingIndex]}"); Console.WriteLine(entry.Status.Message); } } Mutation MutationBuilder() => Mutations.SetCell(columnFamily, columnName, s_greetings[s_greetingIndex], new BigtableVersion(DateTime.UtcNow)); //[END bigtable_hw_write_rows] // [START bigtable_hw_create_filter] RowFilter filter = RowFilters.CellsPerRowLimit(1); // [END bigtable_hw_create_filter] // [START bigtable_hw_get_with_filter] // Read from the table. Console.WriteLine("Read the first row"); int rowIndex = 0; // Read a specific row. Apply a filter to return latest only cell value accross entire row. Row rowRead = bigtableClient.ReadRow( tableName, rowKey: rowKeyPrefix + rowIndex, filter: filter); Console.WriteLine( $"\tRow key: {rowRead.Key.ToStringUtf8()} " + $" -- Value: {rowRead.Families[0].Columns[0].Cells[0].Value.ToStringUtf8(),-16} " + $" -- Time Stamp: {rowRead.Families[0].Columns[0].Cells[0].TimestampMicros}"); // [END bigtable_hw_get_with_filter] // [START bigtable_hw_scan_with_filter] Console.WriteLine("Read all rows using streaming"); // stream the content of the whole table. Apply a filter to return latest only cell values accross all rows. ReadRowsStream responseRead = bigtableClient.ReadRows(tableName, filter: filter); Task printRead = PrintReadRowsAsync(); printRead.Wait(); async Task PrintReadRowsAsync() { await responseRead.ForEachAsync(row => { Console.WriteLine($"\tRow key: {row.Key.ToStringUtf8()} " + $" -- Value: {row.Families[0].Columns[0].Cells[0].Value.ToStringUtf8(),-16} " + $" -- Time Stamp: {row.Families[0].Columns[0].Cells[0].TimestampMicros}"); }); } // [END bigtable_hw_scan_with_filter] // [START bigtable_hw_delete_table] // Clean up. Delete the table. Console.WriteLine($"Delete table: {tableId}"); bigtableTableAdminClient.DeleteTable(name: tableName); if (!TableExist(bigtableTableAdminClient)) { Console.WriteLine($"Table: {tableId} deleted successfully"); } // [END bigtable_hw_delete_table] } catch (Exception ex) { Console.WriteLine($"Exception while running HelloWorld: {ex.Message}"); } }
public void CellsPerRowLimit() { var filter = RowFilters.CellsPerRowLimit(2); Assert.Equal(2, filter.CellsPerRowLimitFilter); }