static void Main(string[] args) { AppDomain.CurrentDomain.UnhandledException += CurrentDomain_UnhandledException; var schemaCreator = new SchemaCreator(); schemaCreator.CreateSchema(); var container = new WindsorContainer() .AddFacility <WcfFacility>() .AddFacility <FactorySupportFacility>() .Register(SessionEndPoint) .Register(Component.For <IProductRepository>().ImplementedBy <ProductRepository>()) .Register(Service <IProductModel, ProductModel>(9595)); var nhConfigurator = new DefaultSessionFactoryConfigurationProvider(); var sfp = new SessionFactoryProvider(nhConfigurator); container.Register(Component.For <ISessionFactoryProvider>().Instance(sfp)); container.Register(Component.For <ISessionWrapper>().ImplementedBy <SessionWrapper>()); container.Register(Component.For <ISessionFactory>().Instance(sfp.GetFactory(null))); IoC.RegisterResolver(new WindsorDependencyResolver(container)); CurrentSessionContext.Wrapper = container.Resolve <ISessionWrapper>(); Console.WriteLine("Server started"); Console.ReadLine(); schemaCreator.CreateSchema(); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { using (var tbl = new Table(session, dbid, "tasks", OpenTableGrbit.None)) { int rows = 0; if (Api.TryMoveFirst(session, tbl)) { var taskTypeColumnId = Api.GetTableColumnid(session, tbl, "task_type"); do { using (var update = new Update(session, tbl, JET_prep.Replace)) { var taskType = Api.RetrieveColumnAsString(session, tbl, taskTypeColumnId, Encoding.Unicode); Api.SetColumn(session, tbl, taskTypeColumnId, taskType, Encoding.ASCII); update.Save(); } if (rows++ % 10000 == 0) { output("Processed " + (rows) + " rows in tasks"); Api.JetCommitTransaction(session, CommitTransactionGrbit.LazyFlush); Api.JetBeginTransaction2(session, BeginTransactionGrbit.None); } } while (Api.TryMoveNext(session, tbl)); } SchemaCreator.UpdateVersion(session, dbid, "5.3"); } }
public void Update(Session session, JET_DBID dbid, Action <string> output) { using (var table = new Table(session, dbid, "indexes_stats", OpenTableGrbit.None)) { byte[] defaultValue = BitConverter.GetBytes(1); JET_COLUMNID columnid; Api.JetAddColumn(session, table, "priority", new JET_COLUMNDEF { coltyp = JET_coltyp.Long, grbit = ColumndefGrbit.ColumnFixed | ColumndefGrbit.ColumnNotNULL }, defaultValue, defaultValue.Length, out columnid); defaultValue = BitConverter.GetBytes(0); Api.JetAddColumn(session, table, "created_timestamp", new JET_COLUMNDEF { cbMax = 8, //64 bits coltyp = JET_coltyp.Binary, grbit = ColumndefGrbit.ColumnFixed | ColumndefGrbit.ColumnNotNULL }, defaultValue, defaultValue.Length, out columnid); Api.JetAddColumn(session, table, "last_indexing_time", new JET_COLUMNDEF { cbMax = 8, //64 bits coltyp = JET_coltyp.Binary, grbit = ColumndefGrbit.ColumnFixed | ColumndefGrbit.ColumnNotNULL }, defaultValue, defaultValue.Length, out columnid); } SchemaCreator.UpdateVersion(session, dbid, "4.6"); }
public void Initialize(UuidGenerator generator, OrderedPartCollection <AbstractFileCodec> codecs, Action <string> putResourceMarker = null) { if (codecs == null) { throw new ArgumentNullException("codecs"); } fileCodecs = codecs; uuidGenerator = generator; var persistenceSource = CreateStorageOptionsFromConfiguration(path, configuration); tableStorage = new TableStorage(persistenceSource, bufferPool); var schemaCreator = new SchemaCreator(configuration, tableStorage, Output, Log); schemaCreator.CreateSchema(); schemaCreator.SetupDatabaseIdAndSchemaVersion(); schemaCreator.UpdateSchemaIfNecessary(); SetupDatabaseId(); idGenerator = new IdGenerator(tableStorage); if (putResourceMarker != null) { putResourceMarker(path); } }
public void Initialize(UuidGenerator generator, OrderedPartCollection <AbstractFileCodec> codecs) { if (codecs == null) { throw new ArgumentNullException("codecs"); } fileCodecs = codecs; uuidGenerator = generator; bool runInMemory; bool.TryParse(settings[Constants.RunInMemory], out runInMemory); var persistenceSource = runInMemory ? StorageEnvironmentOptions.CreateMemoryOnly() : CreateStorageOptionsFromConfiguration(path, settings); tableStorage = new TableStorage(persistenceSource, bufferPool); var schemaCreator = new SchemaCreator(configuration, tableStorage, Output, Log); schemaCreator.CreateSchema(); schemaCreator.SetupDatabaseIdAndSchemaVersion(); schemaCreator.UpdateSchemaIfNecessary(); SetupDatabaseId(); idGenerator = new IdGenerator(tableStorage); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { using (var tbl = new Table(session, dbid, "lists", OpenTableGrbit.None)) { JET_COLUMNID columnid; Api.JetAddColumn(session, tbl, "created_at", new JET_COLUMNDEF { coltyp = JET_coltyp.DateTime, grbit = ColumndefGrbit.ColumnMaybeNull, }, null, 0, out columnid); if (Api.TryMoveFirst(session, tbl)) { do { using (var update = new Update(session, tbl, JET_prep.Replace)) { var createdAt = Api.GetTableColumnid(session, tbl, "created_at"); Api.SetColumn(session, tbl, createdAt, SystemTime.UtcNow); update.Save(); } } while (Api.TryMoveNext(session, tbl)); } SchemaCreator.CreateIndexes(session, tbl, new JET_INDEXCREATE { szIndexName = "by_name_and_created_at", szKey = "+name\0+created_at\0\0", grbit = CreateIndexGrbit.IndexDisallowNull }); SchemaCreator.UpdateVersion(session, dbid, "5.1"); } }
public void Initialize(IUuidGenerator generator, OrderedPartCollection <AbstractDocumentCodec> documentCodecs) { if (generator == null) { throw new ArgumentNullException("generator"); } if (documentCodecs == null) { throw new ArgumentNullException("documentCodecs"); } uuidGenerator = generator; _documentCodecs = documentCodecs; StorageEnvironmentOptions options = configuration.RunInMemory ? CreateMemoryStorageOptionsFromConfiguration(configuration) : CreateStorageOptionsFromConfiguration(configuration); tableStorage = new TableStorage(options, bufferPool); var schemaCreator = new SchemaCreator(configuration, tableStorage, Output, Log); schemaCreator.CreateSchema(); schemaCreator.SetupDatabaseIdAndSchemaVersion(); schemaCreator.UpdateSchemaIfNecessary(); SetupDatabaseId(); }
public static TRevitEntity GetEntity <TRevitEntity>(this Element element) where TRevitEntity : class, IRevitEntity { Type revitEntityType = typeof(TRevitEntity); AttributeExtractor <SchemaAttribute> schemaAttributeExtractor = new AttributeExtractor <SchemaAttribute>(); var schemaAttribute = schemaAttributeExtractor.GetAttribute(revitEntityType); Schema schema = Schema.Lookup(schemaAttribute.GUID); if (schema == null) { return(null); } var entity = element.GetEntity(schema); if (entity == null || !entity.IsValid()) { return(null); } ISchemaCreator schemaCreator = new SchemaCreator(); IEntityConverter entityConverter = new EntityConverter(schemaCreator); var revitEntity = entityConverter.Convert <TRevitEntity>(entity); return(revitEntity); }
public void DropAllIndexingInformation() { Batch(accessor => { var schemaCreator = new SchemaCreator(configuration, tableStorage, Output, Log); var storage = schemaCreator.storage; using (var tx = storage.Environment.NewTransaction(TransactionFlags.ReadWrite)) { //deleting index related trees storage.Environment.DeleteTree(tx, Tables.IndexingStats.TableName); storage.Environment.DeleteTree(tx, Tables.LastIndexedEtags.TableName); storage.Environment.DeleteTree(tx, Tables.DocumentReferences.TableName); storage.Environment.DeleteTree(tx, storage.DocumentReferences.GetIndexKey(Tables.DocumentReferences.Indices.ByRef)); storage.Environment.DeleteTree(tx, storage.DocumentReferences.GetIndexKey(Tables.DocumentReferences.Indices.ByView)); storage.Environment.DeleteTree(tx, storage.DocumentReferences.GetIndexKey(Tables.DocumentReferences.Indices.ByViewAndKey)); storage.Environment.DeleteTree(tx, storage.DocumentReferences.GetIndexKey(Tables.DocumentReferences.Indices.ByKey)); storage.Environment.DeleteTree(tx, Tables.Tasks.TableName); storage.Environment.DeleteTree(tx, storage.Tasks.GetIndexKey(Tables.Tasks.Indices.ByIndexAndType)); storage.Environment.DeleteTree(tx, storage.Tasks.GetIndexKey(Tables.Tasks.Indices.ByType)); storage.Environment.DeleteTree(tx, storage.Tasks.GetIndexKey(Tables.Tasks.Indices.ByIndex)); storage.Environment.DeleteTree(tx, Tables.ScheduledReductions.TableName); storage.Environment.DeleteTree(tx, storage.ScheduledReductions.GetIndexKey(Tables.ScheduledReductions.Indices.ByView)); storage.Environment.DeleteTree(tx, storage.ScheduledReductions.GetIndexKey(Tables.ScheduledReductions.Indices.ByViewAndLevelAndReduceKey)); storage.Environment.DeleteTree(tx, Tables.MappedResults.TableName); storage.Environment.DeleteTree(tx, storage.MappedResults.GetIndexKey(Tables.MappedResults.Indices.ByView)); storage.Environment.DeleteTree(tx, storage.MappedResults.GetIndexKey(Tables.MappedResults.Indices.ByViewAndDocumentId)); storage.Environment.DeleteTree(tx, storage.MappedResults.GetIndexKey(Tables.MappedResults.Indices.ByViewAndReduceKey)); storage.Environment.DeleteTree(tx, storage.MappedResults.GetIndexKey(Tables.MappedResults.Indices.ByViewAndReduceKeyAndSourceBucket)); storage.Environment.DeleteTree(tx, storage.MappedResults.GetIndexKey(Tables.MappedResults.Indices.Data)); storage.Environment.DeleteTree(tx, Tables.ReduceKeyCounts.TableName); storage.Environment.DeleteTree(tx, storage.ReduceKeyCounts.GetIndexKey(Tables.ReduceKeyCounts.Indices.ByView)); storage.Environment.DeleteTree(tx, Tables.ReduceKeyTypes.TableName); storage.Environment.DeleteTree(tx, storage.ReduceKeyTypes.GetIndexKey(Tables.ReduceKeyCounts.Indices.ByView)); storage.Environment.DeleteTree(tx, Tables.ReduceResults.TableName); storage.Environment.DeleteTree(tx, storage.ReduceResults.GetIndexKey(Tables.ReduceResults.Indices.ByViewAndReduceKeyAndLevel)); storage.Environment.DeleteTree(tx, storage.ReduceResults.GetIndexKey(Tables.ReduceResults.Indices.ByViewAndReduceKeyAndLevelAndSourceBucket)); storage.Environment.DeleteTree(tx, storage.ReduceResults.GetIndexKey(Tables.ReduceResults.Indices.ByViewAndReduceKeyAndLevelAndBucket)); storage.Environment.DeleteTree(tx, storage.ReduceResults.GetIndexKey(Tables.ReduceResults.Indices.ByView)); storage.Environment.DeleteTree(tx, storage.ReduceResults.GetIndexKey(Tables.ReduceResults.Indices.Data)); storage.Environment.DeleteTree(tx, Tables.ReduceStats.TableName); storage.Environment.DeleteTree(tx, Tables.IndexingMetadata.TableName); //creating the new empty indexes trees SchemaCreator.CreateIndexingStatsSchema(tx, storage); SchemaCreator.CreateLastIndexedEtagsSchema(tx, storage); SchemaCreator.CreateDocumentReferencesSchema(tx, storage); SchemaCreator.CreateTasksSchema(tx, storage); SchemaCreator.CreateScheduledReductionsSchema(tx, storage); SchemaCreator.CreateMappedResultsSchema(tx, storage); SchemaCreator.CreateReduceKeyCountsSchema(tx, storage); SchemaCreator.CreateReduceKeyTypesSchema(tx, storage); SchemaCreator.CreateReduceResultsSchema(tx, storage); SchemaCreator.CreateReduceStatsSchema(tx, storage); SchemaCreator.CreateIndexingMetadataSchema(tx, storage); tx.Commit(); } accessor.Lists.RemoveAllOlderThan("Raven/Indexes/QueryTime", DateTime.MinValue); accessor.Lists.RemoveAllOlderThan("Raven/Indexes/PendingDeletion", DateTime.MinValue); }); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { using (var table = new Table(session, dbid, "config", OpenTableGrbit.DenyRead | OpenTableGrbit.PermitDDL)) { JET_COLUMNID newMetadataColumnId; Api.JetAddColumn(session, table, "metadata_new", new JET_COLUMNDEF { cbMax = 1024 * 512, coltyp = JET_coltyp.LongText, cp = JET_CP.Unicode, grbit = ColumndefGrbit.ColumnNotNULL }, null, 0, out newMetadataColumnId); } using (var table = new Table(session, dbid, "config", OpenTableGrbit.None)) { Api.MoveBeforeFirst(session, table); var rows = 0; var columnDictionary = Api.GetColumnDictionary(session, table); var metadataColumn = columnDictionary["metadata"]; var nameColumn = columnDictionary["name"]; var newMetadataColumn = columnDictionary["metadata_new"]; while (Api.TryMoveNext(session, table)) { using (var insert = new Update(session, table, JET_prep.Replace)) { var name = Api.RetrieveColumnAsString(session, table, nameColumn, Encoding.Unicode); var metadata = Api.RetrieveColumnAsString(session, table, metadataColumn, Encoding.Unicode); var fixedMetadata = GuidToEtagMigrationInConfigurations(metadata, name); Api.SetColumn(session, table, newMetadataColumn, fixedMetadata, Encoding.Unicode); insert.Save(); } if (rows++ % 100 == 0) { output("Processed " + (rows) + " rows from metadata column in config table"); Api.JetCommitTransaction(session, CommitTransactionGrbit.LazyFlush); Api.JetBeginTransaction2(session, BeginTransactionGrbit.None); } } Api.JetCommitTransaction(session, CommitTransactionGrbit.None); // they cannot be run in transaction scope Api.JetDeleteColumn(session, table, "metadata"); Api.JetRenameColumn(session, table, "metadata_new", "metadata", RenameColumnGrbit.None); Api.JetBeginTransaction2(session, BeginTransactionGrbit.None); } SchemaCreator.UpdateVersion(session, dbid, "0.5"); }
public static void SetEntity(this Element element, IRevitEntity revitEntity) { ISchemaCreator schemaCreator = new SchemaCreator(); IEntityConverter entityConverter = new EntityConverter(schemaCreator); Entity entity = entityConverter.Convert(revitEntity); element.SetEntity(entity); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { Api.JetDeleteTable(session, dbid, "mapped_results"); // just kill the old table, we won't use the data anyway CreateMapResultsTable(session, dbid); CreateReduceResultsTable(session, dbid); CreateScheduledReductionsTable(session, dbid); SchemaCreator.UpdateVersion(session, dbid, "3.8"); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { var defaultVal = BitConverter.GetBytes(0); Api.JetSetColumnDefaultValue(session, dbid, "reduce_keys_status", "reduce_type", defaultVal, defaultVal.Length, SetColumnDefaultValueGrbit.None); SchemaCreator.UpdateVersion(session, dbid, "4.2"); }
private void NgarkoSkemen_Click(object sender, RoutedEventArgs e) { var instanca = ((ComboBoxItem)MyIntances.SelectedItem).Content.ToString(); var databaza = ((ComboBoxItem)DatabaseCombo.SelectedItem).Content.ToString(); ((App)App.Current).Server = instanca; ((App)App.Current).DatabaseName = databaza; SchemaCreator.CreateTree(AllEntities, ChildrenEnities); SchemaExpander.IsExpanded = true; }
void CreateSchema(IDataConnection c, SchemaCreator schemaCreator) { if (!(c is IHasNativeImplementation nativeSession)) { throw new ArgumentException($"{nameof(IDataConnection)} must provide an NHibernate {nameof(ISession)}."); } var session = (ISession)nativeSession.NativeImplementation; schemaCreator.CreateSchema(session.Connection); }
public void Initialize(IUuidGenerator generator, OrderedPartCollection <AbstractDocumentCodec> documentCodecs, Action <string> putResourceMarker = null) { if (generator == null) { throw new ArgumentNullException("generator"); } if (documentCodecs == null) { throw new ArgumentNullException("documentCodecs"); } uuidGenerator = generator; _documentCodecs = documentCodecs; Log.Info("Starting to initialize Voron storage. Path: " + configuration.DataDirectory); StorageEnvironmentOptions options = configuration.RunInMemory ? CreateMemoryStorageOptionsFromConfiguration(configuration) : CreateStorageOptionsFromConfiguration(configuration); options.OnScratchBufferSizeChanged += size => { if (configuration.Storage.Voron.ScratchBufferSizeNotificationThreshold < 0) { return; } if (size < configuration.Storage.Voron.ScratchBufferSizeNotificationThreshold * 1024L * 1024L) { return; } RunTransactionalStorageNotificationHandlers(); }; tableStorage = new TableStorage(options, bufferPool); var schemaCreator = new SchemaCreator(configuration, tableStorage, Output, Log); schemaCreator.CreateSchema(); schemaCreator.SetupDatabaseIdAndSchemaVersion(); if (!configuration.Storage.PreventSchemaUpdate) { schemaCreator.UpdateSchemaIfNecessary(); } SetupDatabaseId(); if (putResourceMarker != null) { putResourceMarker(configuration.DataDirectory); } Log.Info("Voron storage initialized"); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { JET_COLUMNID newDataColumnId; try { using (var table = new Table(session, dbid, "pages", OpenTableGrbit.None)) { Api.JetAddColumn(session, table, "data_new", new JET_COLUMNDEF { cbMax = 4 * StorageConstants.MaxPageSize, // handle possible data expansion because of codecs usage coltyp = JET_coltyp.LongBinary, grbit = ColumndefGrbit.ColumnMaybeNull }, null, 0, out newDataColumnId); Api.MoveBeforeFirst(session, table); var dataColumnId = Api.GetTableColumnid(session, table, "data"); var rows = 0; while (Api.TryMoveNext(session, table)) { using (var insert = new Update(session, table, JET_prep.Replace)) { var value = Api.RetrieveColumn(session, table, dataColumnId); Api.SetColumn(session, table, newDataColumnId, value); insert.Save(); } if (rows++ % 1000 == 0) { output("Processed " + (rows) + " rows from data column in pages table"); Api.JetCommitTransaction(session, CommitTransactionGrbit.LazyFlush); Api.JetBeginTransaction2(session, BeginTransactionGrbit.None); } } Api.JetCommitTransaction(session, CommitTransactionGrbit.None); Api.JetDeleteColumn(session, table, "data"); Api.JetRenameColumn(session, table, "data_new", "data", RenameColumnGrbit.None); Api.JetBeginTransaction2(session, BeginTransactionGrbit.None); } } catch (Exception e) { Console.WriteLine(e); } SchemaCreator.UpdateVersion(session, dbid, "0.4"); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { var tableAndColumns = new[] { new { Table = "indexes_stats", Column = "last_indexed_timestamp" }, new { Table = "indexes_stats_reduce", Column = "last_reduced_timestamp" }, new { Table = "transactions", Column = "timeout" }, new { Table = "documents", Column = "last_modified" }, new { Table = "documents_modified_by_transaction", Column = "last_modified" }, new { Table = "scheduled_reductions", Column = "timestamp" }, new { Table = "scheduled_reductions", Column = "timestamp" }, new { Table = "mapped_results", Column = "timestamp" }, new { Table = "reduce_results", Column = "timestamp" }, new { Table = "tasks", Column = "added_at" }, }; int rows = 0; foreach (var tableAndColumn in tableAndColumns) { using (var table = new Table(session, dbid, tableAndColumn.Table, OpenTableGrbit.None)) { Api.MoveBeforeFirst(session, table); while (Api.TryMoveNext(session, table)) { var columnid = Api.GetTableColumnid(session, table, tableAndColumn.Column); using (var update = new Update(session, table, JET_prep.Replace)) { var bytes = Api.RetrieveColumn(session, table, columnid); var date = DateTime.FromOADate(BitConverter.ToDouble(bytes, 0)); Api.SetColumn(session, table, columnid, date.ToBinary()); update.Save(); } if (rows++ % 10000 == 0) { output("Processed " + (rows - 1) + " rows in " + tableAndColumn.Table); continue; } // pulsing transaction Api.JetCommitTransaction(session, CommitTransactionGrbit.None); Api.JetBeginTransaction2(session, BeginTransactionGrbit.None); } } output("Finished processing " + tableAndColumn.Table); } SchemaCreator.UpdateVersion(session, dbid, "4.3"); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { using (var table = new Table(session, dbid, "scheduled_reductions", OpenTableGrbit.None)) { Api.JetDeleteIndex(session, table, "by_view"); } using (var table = new Table(session, dbid, "indexed_documents_references", OpenTableGrbit.None)) { Api.JetDeleteIndex(session, table, "by_view"); } using (var table = new Table(session, dbid, "reduce_keys_counts", OpenTableGrbit.None)) { Api.JetDeleteIndex(session, table, "by_view"); } using (var table = new Table(session, dbid, "reduce_keys_status", OpenTableGrbit.None)) { Api.JetDeleteIndex(session, table, "by_view"); } using (var table = new Table(session, dbid, "mapped_results", OpenTableGrbit.None)) { Api.JetDeleteIndex(session, table, "by_view"); Api.JetDeleteIndex(session, table, "by_view_and_etag"); Api.JetDeleteIndex(session, table, "by_view_bucket_and_hashed_reduce_key"); Api.JetDeleteIndex(session, table, "by_view_and_hashed_reduce_key"); SchemaCreator.CreateIndexes(session, table, new JET_INDEXCREATE { szIndexName = "by_view_hashed_reduce_key_and_bucket", szKey = "+view\0+hashed_reduce_key\0+bucket\0\0", }); } using (var table = new Table(session, dbid, "reduce_results", OpenTableGrbit.None)) { Api.JetDeleteIndex(session, table, "by_view"); Api.JetDeleteIndex(session, table, "by_view_level_bucket_and_hashed_reduce_key"); SchemaCreator.CreateIndexes(session, table, new JET_INDEXCREATE { szIndexName = "by_view_level_hashed_reduce_key_and_bucket", szKey = "+view\0+level\0+hashed_reduce_key\0+bucket\0\0", }); } SchemaCreator.UpdateVersion(session, dbid, "4.5"); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { using (var table = new Table(session, dbid, "mapped_results", OpenTableGrbit.None)) { SchemaCreator.CreateIndexes(session, table, new JET_INDEXCREATE { szIndexName = "by_view", szKey = "+view\0\0", grbit = CreateIndexGrbit.IndexDisallowNull }); } SchemaCreator.UpdateVersion(session, dbid, "5.4"); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { using (var tbl = new Table(session, dbid, "tasks", OpenTableGrbit.None)) { SchemaCreator.CreateIndexes(session, tbl, new JET_INDEXCREATE { szIndexName = "by_task_type", szKey = "+task_type\0\0", grbit = CreateIndexGrbit.IndexIgnoreNull }); SchemaCreator.UpdateVersion(session, dbid, "5.2"); } }
public void Persistence_test_fails_for_an_entity_which_cannot_be_saved(EntityWithBadlyNamedProperty entity, ConnectionFactoryProvider factoryProvider, SchemaCreator schemaCreator) { var factory = factoryProvider.GetConnectionFactory(); var result = TestPersistence.UsingConnectionProvider(factory) .WithSetup(s => CreateSchema(s, schemaCreator)) .WithEntity(entity) .WithEqualityRule(r => r.ForAllOtherProperties()); Assert.That(() => { Assert.That(result, Persisted.Successfully()); }, Throws.InstanceOf <AssertionException>()); Assert.That(result?.SaveException, Is.Not.Null); }
public void Persistence_test_fails_for_an_incorrectly_mapped_entity(EntityWithUnmappedProperty entity, ConnectionFactoryProvider factoryProvider, SchemaCreator schemaCreator) { var factory = factoryProvider.GetConnectionFactory(); var result = TestPersistence.UsingConnectionProvider(factory) .WithSetup(s => CreateSchema(s, schemaCreator)) .WithEntity(entity) .WithEqualityRule(r => r.ForAllOtherProperties()); Assert.That(() => { Assert.That(result, Persisted.Successfully()); }, Throws.InstanceOf <AssertionException>()); Assert.That(result?.EqualityResult?.RuleResults?.Where(x => !x.Passed).Count(), Is.EqualTo(1)); }
public void Persistence_test_passes_for_a_correctly_mapped_entity(SampleEntity entity, SessionFactoryProvider factoryProvider, SchemaCreator schemaCreator) { var factory = factoryProvider.GetSessionFactory(); var result = TestPersistence.UsingSessionFactory(factory) .WithSetup(s => { schemaCreator.CreateSchema(s.Connection); }) .WithEntity(entity) .WithEqualityRule(r => r.ForAllOtherProperties()); Assert.That(() => { Assert.That(result, Persisted.Successfully()); }, Throws.Nothing); }
// constructor for testing purposes internal MetaModel(SchemaCreator schemaCreator, bool registerGlobally) { // Create a readonly wrapper for handing out _tablesRO = new ReadOnlyCollection <MetaTable>(_tables); _schemaCreator = schemaCreator; _registerGlobally = registerGlobally; // Don't touch Default.Model when we're not using global registration if (registerGlobally) { lock (_lock) { if (Default == null) { Default = this; } } } }
public void Update(Session session, JET_DBID dbid) { using (var tasks = new Table(session, dbid, "tasks", OpenTableGrbit.None)) { Api.JetDeleteIndex(session, tasks, "mergables_by_task_type"); Api.JetDeleteColumn(session, tasks, "supports_merging"); SchemaCreator.CreateIndexes(session, tasks, new JET_INDEXCREATE { szIndexName = "by_index_and_task_type", szKey = "+for_index\0+task_type\0\0", grbit = CreateIndexGrbit.IndexIgnoreNull, }); } SchemaCreator.UpdateVersion(session, dbid, "3.9"); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { using (var tbl = new Table(session, dbid, "lists", OpenTableGrbit.None)) { JET_COLUMNID columnid; var columnids = Api.GetColumnDictionary(session, tbl); if (columnids.ContainsKey("created_at") == false) { Api.JetAddColumn(session, tbl, "created_at", new JET_COLUMNDEF { coltyp = JET_coltyp.DateTime, grbit = ColumndefGrbit.ColumnMaybeNull, }, null, 0, out columnid); } int rows = 0; if (Api.TryMoveFirst(session, tbl)) { do { using (var update = new Update(session, tbl, JET_prep.Replace)) { var createdAt = Api.GetTableColumnid(session, tbl, "created_at"); Api.SetColumn(session, tbl, createdAt, SystemTime.UtcNow); update.Save(); } if (rows++ % 10000 == 0) { output("Processed " + (rows) + " rows in lists"); Api.JetCommitTransaction(session, CommitTransactionGrbit.LazyFlush); Api.JetBeginTransaction2(session, BeginTransactionGrbit.None); } } while (Api.TryMoveNext(session, tbl)); } SchemaCreator.CreateIndexes(session, tbl, new JET_INDEXCREATE { szIndexName = "by_name_and_created_at", szKey = "+name\0+created_at\0\0", grbit = CreateIndexGrbit.IndexDisallowNull }); SchemaCreator.UpdateVersion(session, dbid, "5.1"); } }
public void Initialize() { bool runInMemory; bool.TryParse(settings["Raven/RunInMemory"], out runInMemory); var persistenceSource = runInMemory ? StorageEnvironmentOptions.CreateMemoryOnly() : CreateStorageOptionsFromConfiguration(path, settings); tableStorage = new TableStorage(persistenceSource, bufferPool); var schemaCreator = new SchemaCreator(configuration, tableStorage, Output, Log); schemaCreator.CreateSchema(); schemaCreator.SetupDatabaseIdAndSchemaVersion(); schemaCreator.UpdateSchemaIfNecessary(); SetupDatabaseId(); idGenerator = new IdGenerator(tableStorage); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { using (var tasks = new Table(session, dbid, "tasks", OpenTableGrbit.None)) { // NOTE: the typo in the following string must stay because that's how the original index was named. Api.JetDeleteIndex(session, tasks, "mergables_by_task_type"); Api.JetDeleteColumn(session, tasks, "supports_merging"); SchemaCreator.CreateIndexes(session, tasks, new JET_INDEXCREATE { szIndexName = "by_index_and_task_type", szKey = "+for_index\0+task_type\0\0", grbit = CreateIndexGrbit.IndexIgnoreNull, }); } SchemaCreator.UpdateVersion(session, dbid, "3.9"); }
public void Update(Session session, JET_DBID dbid, Action <string> output) { Api.JetDeleteTable(session, dbid, "transactions"); Api.JetDeleteTable(session, dbid, "documents_modified_by_transaction"); using (var sr = new Table(session, dbid, "scheduled_reductions", OpenTableGrbit.None)) { Api.JetDeleteIndex(session, sr, "by_view_level_and_hashed_reduce_key"); Api.JetDeleteIndex(session, sr, "by_view_level_bucket_and_hashed_reduce_key"); SchemaCreator.CreateIndexes(session, sr, new JET_INDEXCREATE { szIndexName = "by_view_level_and_hashed_reduce_key_and_bucket", szKey = "+view\0+level\0+hashed_reduce_key\0+bucket\0\0", }); } SchemaCreator.UpdateVersion(session, dbid, "4.7"); }
public void TestCopySchema() { SchemaCreator sc = new SchemaCreator(this); sc.Create(); string msg2 = "Hello, world, this used to be Func1!"; string func1q2 = "create procedure Func1 as select '" + msg2 + "'\n"; VxSchema origschema = dbus.Get(); VxSchemaChecksums origsums = dbus.GetChecksums(); string tmpdir = GetTempDir(); try { Directory.CreateDirectory(tmpdir); VxDiskSchema disk = new VxDiskSchema(tmpdir); // Test that the copy function will create new elements VxSchema.CopySchema(dbus, disk); VxSchema newschema = disk.Get(null); VxSchemaChecksums newsums = disk.GetChecksums(); WVPASS(1); TestSchemaEquality(origschema, newschema); WVPASS(2); TestChecksumEquality(origsums, newsums); WVPASS(3); // Test that the copy function updates changed elements, and // deletes old ones. origschema["Procedure/Func1"].text = func1q2; dbus.Put(origschema, null, VxPutOpts.None); dbus.DropSchema("Table/Tab2"); origschema.Remove("Table/Tab2"); origsums = dbus.GetChecksums(); VxSchema.CopySchema(dbus, disk); newschema = disk.Get(null); newsums = disk.GetChecksums(); WVPASS(4); TestSchemaEquality(origschema, newschema); WVPASS(5); TestChecksumEquality(origsums, newsums); WVPASS(6); } finally { Directory.Delete(tmpdir, true); } WVPASS(!Directory.Exists(tmpdir)); sc.Cleanup(); }
public void TestDropSchema() { SchemaCreator sc = new SchemaCreator(this); sc.Create(); VxSchemaChecksums sums = dbus.GetChecksums(); WVASSERT(sums.ContainsKey("Procedure/Func1")); WVASSERT(sums.ContainsKey("ScalarFunction/Func2")); WVASSERT(sums.ContainsKey("Table/Tab1")); WVASSERT(sums.ContainsKey("Table/Tab2")); WVASSERT(sums.ContainsKey("XMLSchema/TestSchema")); dbus.DropSchema("Procedure/Func1", "ScalarFunction/Func2", "Table/Tab2", "XMLSchema/TestSchema"); sums = dbus.GetChecksums(); WVASSERT(!sums.ContainsKey("Procedure/Func1")); WVASSERT(!sums.ContainsKey("ScalarFunction/Func2")); WVASSERT(sums.ContainsKey("Table/Tab1")); WVASSERT(!sums.ContainsKey("Table/Tab2")); WVASSERT(!sums.ContainsKey("XMLSchema/TestSchema")); VxSchemaErrors errs = dbus.DropSchema("Procedure/Func1"); WVPASSEQ(errs.Count, 1); WVPASSEQ(errs["Procedure/Func1"][0].msg, "Cannot drop the procedure 'Func1', because it does not exist " + "or you do not have permission."); sc.Cleanup(); }
public void TestPutSchema() { SchemaCreator sc = new SchemaCreator(this); VxPutOpts no_opts = VxPutOpts.None; WVPASSEQ(VxPutSchema("Table", "Tab1", sc.tab1sch, no_opts), null); WVPASSEQ(VxPutSchema("TableFunction", "TabFunc1", sc.tabfuncq, no_opts), null); WVPASSEQ(VxPutSchema("Procedure", "Func1", sc.func1q, no_opts), null); WVPASSEQ(VxPutSchema("Trigger", "Trigger1", sc.triggerq, no_opts), null); WVPASSEQ(VxPutSchema("View", "View1", sc.viewq, no_opts), null); WVPASSEQ(VxPutSchema("XMLSchema", "TestSchema", sc.xmlq, no_opts), null); VxSchema schema = dbus.Get(); WVASSERT(schema.ContainsKey("Procedure/Func1")); WVPASSEQ(schema["Procedure/Func1"].name, "Func1"); WVPASSEQ(schema["Procedure/Func1"].type, "Procedure"); WVPASSEQ(schema["Procedure/Func1"].text, sc.func1q); WVASSERT(schema.ContainsKey("Table/Tab1")); WVPASSEQ(schema["Table/Tab1"].name, "Tab1"); WVPASSEQ(schema["Table/Tab1"].type, "Table"); WVPASSEQ(schema["Table/Tab1"].text, sc.tab1sch); WVASSERT(schema.ContainsKey("TableFunction/TabFunc1")); WVPASSEQ(schema["TableFunction/TabFunc1"].name, "TabFunc1"); WVPASSEQ(schema["TableFunction/TabFunc1"].type, "TableFunction"); WVPASSEQ(schema["TableFunction/TabFunc1"].text, sc.tabfuncq); WVASSERT(schema.ContainsKey("Trigger/Trigger1")); WVPASSEQ(schema["Trigger/Trigger1"].name, "Trigger1"); WVPASSEQ(schema["Trigger/Trigger1"].type, "Trigger"); WVPASSEQ(schema["Trigger/Trigger1"].text, sc.triggerq); WVASSERT(schema.ContainsKey("View/View1")); WVPASSEQ(schema["View/View1"].name, "View1"); WVPASSEQ(schema["View/View1"].type, "View"); WVPASSEQ(schema["View/View1"].text, sc.viewq); WVASSERT(schema.ContainsKey("XMLSchema/TestSchema")); WVPASSEQ(schema["XMLSchema/TestSchema"].name, "TestSchema"); WVPASSEQ(schema["XMLSchema/TestSchema"].type, "XMLSchema"); WVPASSEQ(schema["XMLSchema/TestSchema"].text, sc.xmlq); // Check that putting again with no changes doesn't cause errors, even // without the destructive option. WVPASSEQ(VxPutSchema("Table", "Tab1", sc.tab1sch, no_opts), null); WVPASSEQ(VxPutSchema("TableFunction", "TabFunc1", sc.tabfuncq, no_opts), null); WVPASSEQ(VxPutSchema("Procedure", "Func1", sc.func1q, no_opts), null); WVPASSEQ(VxPutSchema("Trigger", "Trigger1", sc.triggerq, no_opts), null); WVPASSEQ(VxPutSchema("View", "View1", sc.viewq, no_opts), null); WVPASSEQ(VxPutSchema("XMLSchema", "TestSchema", sc.xmlq, no_opts), null); string tab1sch2 = "column: name=f4,type=binary,null=0,length=1\n"; VxSchemaError err = VxPutSchema("Table", "Tab1", tab1sch2, no_opts); WVPASS(err != null); WVPASSEQ(err.key, "Table/Tab1"); WVPASSEQ(err.msg, "Refusing to drop columns ([f1], [f2], [f3]) when the destructive option is not set."); WVPASSEQ(err.errnum, -1); schema = dbus.Get("Table/Tab1"); WVPASSEQ(schema["Table/Tab1"].name, "Tab1"); WVPASSEQ(schema["Table/Tab1"].type, "Table"); WVPASSEQ(schema["Table/Tab1"].text, sc.tab1sch); WVPASSEQ(VxPutSchema("Table", "Tab1", tab1sch2, VxPutOpts.Destructive), null); schema = dbus.Get("Table/Tab1"); WVPASSEQ(schema["Table/Tab1"].name, "Tab1"); WVPASSEQ(schema["Table/Tab1"].type, "Table"); WVPASSEQ(schema["Table/Tab1"].text, tab1sch2); string msg2 = "This is definitely not the Func1 you thought you knew."; string func1q2 = "create procedure Func1 as select '" + msg2 + "'"; WVPASSEQ(VxPutSchema("Procedure", "Func1", func1q2, no_opts), null); schema = dbus.Get("Procedure/Func1"); WVPASSEQ(schema["Procedure/Func1"].name, "Func1"); WVPASSEQ(schema["Procedure/Func1"].type, "Procedure"); WVPASSEQ(schema["Procedure/Func1"].text, func1q2); sc.Cleanup(); }
public void TestSchemaData() { SchemaCreator sc = new SchemaCreator(this); WVPASSEQ(VxPutSchema("Table", "Tab1", sc.tab1sch, VxPutOpts.None), null); var inserts = new List<string>(); var rows = new List<string>(); // headings string heading = "\"f1\",\"f2\",\"f3\""; rows.Add(heading); for (int ii = 21; ii >= 0; ii--) inserts.Add(wv.fmt("INSERT INTO Tab1 ([f1],[f2],[f3]) " + "VALUES ({0},{1},'{2}');", ii, ii + ".3400", "Hi" + ii)); // The rows will come back sorted alphabetically. int[] order = new int[] { 0, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 21, 3, 4, 5, 6, 7, 8, 9 }; foreach (int ii in order) rows.Add(wv.fmt("{0},{1},{2}", ii, ii + ".3400", "Hi" + ii)); /* inserts.Add("INSERT INTO Tab1 ([f1],[f2],[f3]) " + "VALUES (9501,123.4567," + "'This string''s good for \"testing\" escaping, isn''t it?');");*/ inserts.Add("INSERT INTO Tab1 ([f1],[f2],[f3]) " + "VALUES (9500,234.5678,NULL);"); rows.Add("9500,234.5678,"); /* rows.Add("9501,123.4567," + "\"This string''s good for \\\"testing\\\" escaping, isn't it?\"");*/ // terminating blank rows.Add(""); foreach (string ins in inserts) WVASSERT(VxExec(ins)); string[] newrows = dbus.GetSchemaData("Tab1", 0, "", null, null).split("\n"); WVPASSEQ(newrows.Length, rows.Count); for (int i = 0; i < newrows.Length; i++) WVPASSEQ(newrows[i], rows[i]); VxExec("drop table Tab1"); try { WVEXCEPT(dbus.GetSchemaData("Tab1", 0, "", null, null)); } catch (Wv.Test.WvAssertionFailure e) { throw e; } catch (System.Exception e) { WVPASS(e is WvDbusError); WVPASSEQ(e.Message, "vx.db.sqlerror: Invalid object name 'Tab1'."); log.print(e.ToString() + "\n"); } WVPASSEQ(VxPutSchema("Table", "Tab1", sc.tab1sch, VxPutOpts.None), null); WVPASSEQ(dbus.GetSchemaData("Tab1", 0, "", null, null), heading + "\n"); dbus.PutSchemaData("Tab1", inserts.join(""), 0); WVPASSEQ(dbus.GetSchemaData("Tab1", 0, "", null, null), rows.join("\n")); WVPASSEQ(dbus.GetSchemaData("Tab1", 0, "f1 = 11", null, null), heading + "\n11,11.3400,Hi11\n"); sc.Cleanup(); }
public void TestApplySchemaDiff(ISchemaBackend backend) { log.print("In TestApplySchemaDiff({0})\n", backend.GetType().ToString()); SchemaCreator sc = new SchemaCreator(this); sc.Create(); string msg2 = "Hello, world, this used to be Func1!"; string func1q2 = "create procedure Func1 as select '" + msg2 + "'\n"; VxSchema origschema = dbus.Get(); VxSchemaChecksums origsums = dbus.GetChecksums(); VxSchema newschema = new VxSchema(origschema); VxSchemaChecksums newsums = new VxSchemaChecksums(origsums); // Don't bother putting the data again if we're talking to dbus: // we already snuck it in the back door. if (backend != dbus) backend.Put(origschema, origsums, VxPutOpts.None); VxSchemaChecksums diffsums = new VxSchemaChecksums(newsums); // Make some changes to create an interesting diff. // Change the text and sums of Func1, schedule TestSchema for // deletion, and act like Tab2 is new. newschema["Procedure/Func1"].text = func1q2; newsums.AddSum("Procedure/Func1", 123); newsums.Remove("XMLSchema/TestSchema"); origsums.Remove("Table/Tab2"); WVASSERT(VxExec("drop table Tab2")); VxSchemaDiff diff = new VxSchemaDiff(origsums, newsums); using (IEnumerator<KeyValuePair<string,VxDiffType>> iter = diff.GetEnumerator()) { WVPASS(iter.MoveNext()); WVPASSEQ(iter.Current.Key, "XMLSchema/TestSchema"); WVPASSEQ((char)iter.Current.Value, (char)VxDiffType.Remove); WVPASS(iter.MoveNext()); WVPASSEQ(iter.Current.Key, "Table/Tab2"); WVPASSEQ((char)iter.Current.Value, (char)VxDiffType.Add); WVPASS(iter.MoveNext()); WVPASSEQ(iter.Current.Key, "Procedure/Func1"); WVPASSEQ((char)iter.Current.Value, (char)VxDiffType.Change); WVFAIL(iter.MoveNext()); } VxSchema diffschema = newschema.GetDiffElements(diff); WVPASSEQ(diffschema["XMLSchema/TestSchema"].type, "XMLSchema"); WVPASSEQ(diffschema["XMLSchema/TestSchema"].name, "TestSchema"); WVPASSEQ(diffschema["XMLSchema/TestSchema"].text, ""); WVPASSEQ(diffschema["Table/Tab2"].type, "Table"); WVPASSEQ(diffschema["Table/Tab2"].name, "Tab2"); WVPASSEQ(diffschema["Table/Tab2"].text, sc.tab2sch); WVPASSEQ(diffschema["Procedure/Func1"].type, "Procedure"); WVPASSEQ(diffschema["Procedure/Func1"].name, "Func1"); WVPASSEQ(diffschema["Procedure/Func1"].text, func1q2); VxSchemaErrors errs = backend.Put(diffschema, diffsums, VxPutOpts.None); WVPASSEQ(errs.Count, 0); VxSchema updated = backend.Get(null); WVASSERT(!updated.ContainsKey("XMLSchema/TestSchema")); WVPASSEQ(updated["Table/Tab1"].text, newschema["Table/Tab1"].text); WVPASSEQ(updated["Table/Tab2"].text, newschema["Table/Tab2"].text); WVPASSEQ(updated["Procedure/Func1"].text, newschema["Procedure/Func1"].text); sc.Cleanup(); }
public void TestPutSchemaErrors() { //WvLog.maxlevel = WvLog.L.Debug4; WVPASS("hello"); SchemaCreator sc = new SchemaCreator(this); sc.Create(); // Check that putting the same elements doesn't cause errors VxPutOpts no_opts = VxPutOpts.None; WVPASS("getting"); VxSchema schema = dbus.Get(); WVPASS("putting"); VxSchemaErrors errs = VxPutSchema(schema, no_opts); WVPASSEQ(errs.Count, 0); // Check that invalid SQL causes errors. schema = new VxSchema(); schema.Add("ScalarFunction", "ErrSF", "I am not valid SQL", false); schema.Add("TableFunction", "ErrTF", "I'm not valid SQL either", false); errs = VxPutSchema(schema, no_opts); log.print("Results: \n{0}", errs.ToString()); log.print("Done results.\n"); WVPASSEQ(errs.Count, 2); WVPASSEQ(errs["ScalarFunction/ErrSF"][0].key, "ScalarFunction/ErrSF"); WVPASSEQ(errs["ScalarFunction/ErrSF"][0].msg, "Incorrect syntax near the keyword 'not'."); WVPASSEQ(errs["ScalarFunction/ErrSF"][0].errnum, 156); WVPASSEQ(errs["ScalarFunction/ErrSF"].Count, 1); WVPASSEQ(errs["TableFunction/ErrTF"][0].key, "TableFunction/ErrTF"); WVPASSEQ(errs["TableFunction/ErrTF"][0].msg, "Unclosed quotation mark after the character string 'm not valid SQL either'."); WVPASSEQ(errs["TableFunction/ErrTF"][0].errnum, 105); WVPASSEQ(errs["TableFunction/ErrTF"].Count, 1); sc.Cleanup(); }
public void TestReadChecksums() { SchemaCreator sc = new SchemaCreator(this); sc.Create(); string tmpdir = GetTempDir(); DirectoryInfo tmpdirinfo = new DirectoryInfo(tmpdir); try { tmpdirinfo.Create(); VxSchema schema = dbus.Get(); VxSchemaChecksums sums = dbus.GetChecksums(); VxDiskSchema backend = new VxDiskSchema(tmpdir); backend.Put(schema, sums, VxPutOpts.None); VxSchemaChecksums fromdisk = backend.GetChecksums(); foreach (KeyValuePair<string, VxSchemaChecksum> p in sums) WVPASSEQ(p.Value.GetSumString(), fromdisk[p.Key].GetSumString()); WVPASSEQ(sums.Count, fromdisk.Count); // Test that changing a file invalidates its checksums, and that // we skip directories named "DATA" using (StreamWriter sw = File.AppendText( wv.PathCombine(tmpdir, "Table", "Tab1"))) { sw.WriteLine("Ooga Booga"); } Directory.CreateDirectory(Path.Combine(tmpdir, "DATA")); File.WriteAllText(wv.PathCombine(tmpdir, "DATA", "Decoy"), "Decoy file, shouldn't have checksums"); VxSchemaChecksums mangled = backend.GetChecksums(); // Check that the decoy file didn't get read WVFAIL(mangled.ContainsKey("DATA/Decoy")); // Check that the mangled checksums exist, but are empty. WVASSERT(mangled.ContainsKey("Table/Tab1")); WVASSERT(mangled["Table/Tab1"].GetSumString() != sums["Table/Tab1"].GetSumString()); WVPASSEQ(mangled["Table/Tab1"].GetSumString(), ""); // Check that everything else is still sensible foreach (KeyValuePair<string, VxSchemaChecksum> p in sums) { if (p.Key != "Table/Tab1") WVPASSEQ(p.Value.GetSumString(), mangled[p.Key].GetSumString()); } } finally { tmpdirinfo.Delete(true); sc.Cleanup(); } WVASSERT(!tmpdirinfo.Exists); }
public void TestGetXmlSchemas() { SchemaCreator sc = new SchemaCreator(this); try { VxExec("drop xml schema collection TestSchema"); } catch { } try { VxExec("drop xml schema collection TestSchema2"); } catch { } string query1 = sc.xmlq; WVASSERT(VxExec(query1)); // Make a long XML Schema, to test the 4000-character chunking string query2 = "\nCREATE XML SCHEMA COLLECTION [dbo].[TestSchema2] AS " + "'<xsd:schema xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\">" + "<xsd:element name=\"Employee\">" + "<xsd:complexType>" + "<xsd:complexContent>" + "<xsd:restriction base=\"xsd:anyType\">" + "<xsd:sequence>"; while (query2.Length < 8000) query2 += "<xsd:element name=\"SIN\" type=\"xsd:string\"/>" + "<xsd:element name=\"Name\" type=\"xsd:string\"/>" + "<xsd:element name=\"DateOfBirth\" type=\"xsd:date\"/>" + "<xsd:element name=\"EmployeeType\" type=\"xsd:string\"/>" + "<xsd:element name=\"Salary\" type=\"xsd:long\"/>"; query2 += "</xsd:sequence>" + "</xsd:restriction>" + "</xsd:complexContent>" + "</xsd:complexType>" + "</xsd:element>" + "</xsd:schema>'\n"; WVASSERT(VxExec(query2)); // Test that the query limiting works VxSchema schema = dbus.Get("TestSchema"); WVPASSEQ(schema.Count, 1); WVASSERT(schema.ContainsKey("XMLSchema/TestSchema")); WVPASSEQ(schema["XMLSchema/TestSchema"].name, "TestSchema"); WVPASSEQ(schema["XMLSchema/TestSchema"].type, "XMLSchema"); WVPASSEQ(schema["XMLSchema/TestSchema"].encrypted, false); WVPASSEQ(schema["XMLSchema/TestSchema"].text, query1); schema = dbus.Get("XMLSchema/TestSchema"); WVPASSEQ(schema.Count, 1); WVASSERT(schema.ContainsKey("XMLSchema/TestSchema")); WVPASSEQ(schema["XMLSchema/TestSchema"].name, "TestSchema"); WVPASSEQ(schema["XMLSchema/TestSchema"].type, "XMLSchema"); WVPASSEQ(schema["XMLSchema/TestSchema"].encrypted, false); WVPASSEQ(schema["XMLSchema/TestSchema"].text, query1); // Also check that unlimited queries get everything schema = dbus.Get(); WVASSERT(schema.Count >= 2) WVASSERT(schema.ContainsKey("XMLSchema/TestSchema")); WVPASSEQ(schema["XMLSchema/TestSchema"].name, "TestSchema"); WVPASSEQ(schema["XMLSchema/TestSchema"].type, "XMLSchema"); WVPASSEQ(schema["XMLSchema/TestSchema"].encrypted, false); WVPASSEQ(schema["XMLSchema/TestSchema"].text, query1); WVASSERT(schema.ContainsKey("XMLSchema/TestSchema2")); WVPASSEQ(schema["XMLSchema/TestSchema2"].name, "TestSchema2"); WVPASSEQ(schema["XMLSchema/TestSchema2"].type, "XMLSchema"); WVPASSEQ(schema["XMLSchema/TestSchema2"].encrypted, false); WVPASSEQ(schema["XMLSchema/TestSchema2"].text, query2); WVASSERT(VxExec("drop xml schema collection TestSchema")); WVASSERT(VxExec("drop xml schema collection TestSchema2")); }
public void TestExportSchema() { SchemaCreator sc = new SchemaCreator(this); string tmpdir = GetTempDir(); DirectoryInfo tmpdirinfo = new DirectoryInfo(tmpdir); try { tmpdirinfo.Create(); // Establish a baseline for the number of existing elements. VxSchema schema = dbus.Get(); VxSchemaChecksums sums = dbus.GetChecksums(); VxDiskSchema disk = new VxDiskSchema(tmpdir); disk.Put(schema, sums, VxPutOpts.None); var base_filecounts = GetFileCounts(tmpdir); // Clobber the directory and start fresh. tmpdirinfo.Delete(true); tmpdirinfo.Create(); // Now create our test schema and do the real tests. sc.Create(); // Check that having mangled checksums fails schema = dbus.Get(); sums = new VxSchemaChecksums(); disk = new VxDiskSchema(tmpdir); try { WVEXCEPT(disk.Put(schema, sums, VxPutOpts.None)); } catch (Wv.Test.WvAssertionFailure e) { throw e; } catch (System.Exception e) { WVPASS(e is ArgumentException); WVPASS(e.Message.StartsWith("Missing checksum for ")); log.print(e.ToString() + "\n"); } // Check that the normal exporting works. schema = dbus.Get(); sums = dbus.GetChecksums(); WVPASSEQ(schema.Count, sums.Count); disk.Put(schema, sums, VxPutOpts.None); int backup_generation = 0; VerifyExportedSchema(tmpdir, schema, sums, sc, backup_generation, base_filecounts); // Check that we read back the same stuff VxSchema schemafromdisk = disk.Get(null); VxSchemaChecksums sumsfromdisk = disk.GetChecksums(); WVPASS(1); TestSchemaEquality(schema, schemafromdisk); TestChecksumEquality(sums, sumsfromdisk); WVPASS(2); // Doing it twice doesn't change anything. disk.Put(schema, sums, VxPutOpts.None); VerifyExportedSchema(tmpdir, schema, sums, sc, backup_generation, base_filecounts); WVPASS(3); // Check backup mode disk.Put(schema, sums, VxPutOpts.IsBackup); backup_generation++; VerifyExportedSchema(tmpdir, schema, sums, sc, backup_generation, base_filecounts); WVPASS(4); // Check backup mode again disk.Put(schema, sums, VxPutOpts.IsBackup); backup_generation++; VerifyExportedSchema(tmpdir, schema, sums, sc, backup_generation, base_filecounts); WVPASS(5); } finally { tmpdirinfo.Delete(true); sc.Cleanup(); } WVASSERT(!tmpdirinfo.Exists); }
private void VerifyExportedSchema(string exportdir, VxSchema schema, VxSchemaChecksums sums, SchemaCreator sc, int backupnum, Dictionary<string, int> base_filecounts) { int filemultiplier = backupnum + 1; string suffix = backupnum == 0 ? "" : "-" + backupnum; string procdir = Path.Combine(exportdir, "Procedure"); string scalardir = Path.Combine(exportdir, "ScalarFunction"); string idxdir = Path.Combine(exportdir, "Index"); string tabdir = Path.Combine(exportdir, "Table"); string tabfuncdir = Path.Combine(exportdir, "TableFunction"); string triggerdir = Path.Combine(exportdir, "Trigger"); string viewdir = Path.Combine(exportdir, "View"); string xmldir = Path.Combine(exportdir, "XMLSchema"); WVPASSEQ(Directory.GetFiles(exportdir).Length, 0); WVPASS(Directory.Exists(procdir)); WVPASS(Directory.Exists(scalardir)); // We no longer store indexes in a separate directory; make sure // that directory doesn't get created. WVPASS(!Directory.Exists(idxdir)); WVPASS(Directory.Exists(tabdir)); WVPASS(Directory.Exists(tabfuncdir)); WVPASS(Directory.Exists(triggerdir)); WVPASS(Directory.Exists(viewdir)); WVPASS(Directory.Exists(xmldir)); WVPASSEQ(Directory.GetDirectories(exportdir).Length, 7); Dictionary<string, int> filecounts = GetFileCounts(exportdir); // Procedures WVPASSEQ(Directory.GetDirectories(procdir).Length, 0); WVPASSEQ(filecounts["Procedure"], (1 + base_filecounts["Procedure"]) * filemultiplier); string func1file = Path.Combine(procdir, "Func1" + suffix); CheckExportedFileContents(func1file, "!!SCHEMAMATIC 2ae46ac0748aede839fb9cd167ea1180 0xd983a305 ", sc.func1q); // Scalar functions WVPASSEQ(Directory.GetDirectories(scalardir).Length, 0); WVPASSEQ(filecounts["ScalarFunction"], (1 + base_filecounts["ScalarFunction"]) * filemultiplier); string func2file = Path.Combine(scalardir, "Func2" + suffix); CheckExportedFileContents(func2file, "!!SCHEMAMATIC c7c257ba4f7817e4e460a3cef0c78985 0xd6fe554f ", sc.func2q); // Table-valued functions WVPASSEQ(Directory.GetDirectories(tabfuncdir).Length, 0); WVPASSEQ(filecounts["TableFunction"], (1 + base_filecounts["TableFunction"]) * filemultiplier); string tabfunc1file = Path.Combine(tabfuncdir, "TabFunc1" + suffix); CheckExportedFileContents(tabfunc1file, "!!SCHEMAMATIC 1d3f1392a80e44876254209feebe7860 0x4b96fbe4 ", sc.tabfuncq); // Tables WVPASSEQ(Directory.GetDirectories(tabdir).Length, 0); WVPASSEQ(filecounts["Table"], (2 + base_filecounts["Table"]) * filemultiplier); string tab1file = Path.Combine(tabdir, "Tab1" + suffix); string tab2file = Path.Combine(tabdir, "Tab2" + suffix); WVPASS(File.Exists(tab1file)); CheckExportedFileContents(tab1file, "!!SCHEMAMATIC 72c64bda7c48a954e63f359ff1fa4e79 " + sums["Table/Tab1"].GetSumString() + " ", sc.tab1sch); WVPASS(File.Exists(tab2file)); CheckExportedFileContents(tab2file, "!!SCHEMAMATIC 69b15b6da6961a0f006fa55106cb243b " + sums["Table/Tab2"].GetSumString() + " ", sc.tab2sch); // Triggers WVPASSEQ(Directory.GetDirectories(triggerdir).Length, 0); WVPASSEQ(filecounts["Trigger"], (1 + base_filecounts["Trigger"]) * filemultiplier); string triggerfile = Path.Combine(triggerdir, "Trigger1" + suffix); CheckExportedFileContents(triggerfile, "!!SCHEMAMATIC eb7556c49140340ff74f06660a55457b 0x5a93c375 ", sc.triggerq); // Views WVPASSEQ(Directory.GetDirectories(viewdir).Length, 0); WVPASSEQ(filecounts["View"], (1 + base_filecounts["View"]) * filemultiplier); string viewfile = Path.Combine(viewdir, "View1" + suffix); CheckExportedFileContents(viewfile, "!!SCHEMAMATIC b43a8c712d3a274a6842fc2413516665 0xe0af9ccd ", sc.viewq); // XML Schemas WVPASSEQ(Directory.GetDirectories(xmldir).Length, 0); WVPASSEQ(filecounts["XMLSchema"], (1 + base_filecounts["XMLSchema"]) * filemultiplier); string testschemafile = Path.Combine(xmldir, "TestSchema" + suffix); WVPASS(File.Exists(testschemafile)); CheckExportedFileContents(testschemafile, "!!SCHEMAMATIC f45c4ea54c268c91f41c7054c8f20bc9 0xf4b2c764 ", sc.xmlq); }
public void TestXmlSchemaChecksums() { SchemaCreator sc = new SchemaCreator(this); WVASSERT(VxExec(sc.xmlq)); VxSchemaChecksums sums; sums = dbus.GetChecksums(); WVPASSEQ(sums["XMLSchema/TestSchema"].checksums.Count(), 1); WVPASSEQ(sums["XMLSchema/TestSchema"].checksums.First(), 4105357156); WVASSERT(VxExec("drop xml schema collection TestSchema")); sc.Cleanup(); }
public void TestTableChecksums() { SchemaCreator sc = new SchemaCreator(this); sc.Create(); VxSchemaChecksums sums; sums = dbus.GetChecksums(); // Three columns, and two indexes each with two columns, gives us // seven checksums WVPASSEQ(sums["Table/Tab1"].checksums.Count(), 7); WVPASSEQ(sums["Table/Tab1"].checksums.ElementAt(0), 0x00B0B636) WVPASSEQ(sums["Table/Tab1"].checksums.ElementAt(1), 0x1D32C7EA) WVPASSEQ(sums["Table/Tab1"].checksums.ElementAt(2), 0x968DBEDC) WVPASSEQ(sums["Table/Tab1"].checksums.ElementAt(3), 0xAB109B86) WVPASSEQ(sums["Table/Tab1"].checksums.ElementAt(4), 0xC1A74EA4) WVPASSEQ(sums["Table/Tab1"].checksums.ElementAt(5), 0xE50EE702) WVPASSEQ(sums["Table/Tab1"].checksums.ElementAt(6), 0xE8634548) WVASSERT(VxExec("drop table Tab1")); sc.Cleanup(); }
public void Initialize(IUuidGenerator generator, OrderedPartCollection<AbstractDocumentCodec> documentCodecs) { if (generator == null) throw new ArgumentNullException("generator"); if (documentCodecs == null) throw new ArgumentNullException("documentCodecs"); uuidGenerator = generator; _documentCodecs = documentCodecs; StorageEnvironmentOptions options = configuration.RunInMemory ? CreateMemoryStorageOptionsFromConfiguration(configuration) : CreateStorageOptionsFromConfiguration(configuration); tableStorage = new TableStorage(options, bufferPool); var schemaCreator = new SchemaCreator(configuration, tableStorage, Output, Log); schemaCreator.CreateSchema(); schemaCreator.SetupDatabaseIdAndSchemaVersion(); schemaCreator.UpdateSchemaIfNecessary(); SetupDatabaseId(); }
public void Initialize(IUuidGenerator generator, OrderedPartCollection<AbstractDocumentCodec> documentCodecs) { if (generator == null) throw new ArgumentNullException("generator"); if (documentCodecs == null) throw new ArgumentNullException("documentCodecs"); uuidGenerator = generator; _documentCodecs = documentCodecs; StorageEnvironmentOptions options = configuration.RunInMemory ? CreateMemoryStorageOptionsFromConfiguration(configuration) : CreateStorageOptionsFromConfiguration(configuration); options.OnScratchBufferSizeChanged += size => { if (configuration.Storage.Voron.ScratchBufferSizeNotificationThreshold < 0) return; if (size < configuration.Storage.Voron.ScratchBufferSizeNotificationThreshold * 1024L * 1024L) return; RunTransactionalStorageNotificationHandlers(); }; tableStorage = new TableStorage(options, bufferPool); var schemaCreator = new SchemaCreator(configuration, tableStorage, Output, Log); schemaCreator.CreateSchema(); schemaCreator.SetupDatabaseIdAndSchemaVersion(); if (!configuration.Storage.PreventSchemaUpdate) schemaCreator.UpdateSchemaIfNecessary(); SetupDatabaseId(); }