Ejemplo n.º 1
0
		/// <summary>
		/// Open
		/// </summary>
		/// <param name="fname"></param>
		public void Open(string fname)
		{
			lock(this.TableBlocking)
			{
				RaiseExceptionIfOpened();
				if(fname.ToLower().EndsWith(".hnd"))
					fname=fname.Substring(0,fname.Length-4);
				DatabaseFilePath=System.IO.Path.GetFullPath(fname)+".hnd";

				// Initial values
				if(!File.Exists(this.DatabaseFilePath))
				{
					try
					{
						fsDB = new FileStream(this.DatabaseFilePath,FileMode.Create,FileAccess.ReadWrite,FileShare.None,8*1024);
					}
					catch
					{
						throw new Exception("Can't create file.");
					}
				}
				else
				{
					try
					{
						fsDB = new FileStream(this.DatabaseFilePath,FileMode.Open,FileAccess.ReadWrite,FileShare.None,8*1024);
					}
					catch
					{
						throw new Exception("Database in use.");
					}
				}
				long len = (fsDB.Length/PageSize); len*=PageSize;
				if(fsDB.Length>len)
				{
					this.LogToFile("Warning","File size fixed.");
					fsDB.SetLength(len);
				}
				slFID2Pages = new SortedList();
				TableName2TID = new Hashtable();
				TID2Def = new Hashtable();
				pcInit();
				//PagesInUse = new SortedSet();
				DeletedPages = new SortedSet();
				br = new BinaryReader(fsDB,System.Text.Encoding.Unicode);
				bw = new BinaryWriter(fsDB,System.Text.Encoding.Unicode);

				// check log file
				if(true)
				{
					string lfn = DatabaseFilePath+".hlg";
					if(File.Exists(lfn))
					{
						FileStream lf = new FileStream(lfn,FileMode.Open,FileAccess.ReadWrite,FileShare.None);
						BinaryReader lfr = new BinaryReader(lf,System.Text.Encoding.Unicode);
						try
						{
							if((lfr.BaseStream.Length>0)&&lfr.ReadBoolean())
							{// recover from last crash
								byte logtype = lfr.ReadByte();
								if(logtype==0)
								{// delete pages op
									this.LogToFile("Warning","Deleted pages fixed.");
									ArrayList al = new ArrayList();
									int cnt = lfr.ReadInt32();
									for(int n=0;n<cnt;n++)
									{
										al.Add( lfr.ReadInt32() );
									}
									for(int n=0;n<cnt;n++)
									{
										bw.BaseStream.Position=PageSize*( (int)al[n] );
										bw.Write( true ); // deleted
									}
									bw.Flush();
									lf.SetLength(0);
									lf.Flush();
								}
								if(logtype==1)
								{// rollback pages
									this.LogToFile("Warning","Rollback modified pages.");
									int pcount = lfr.ReadInt32(); // num of pages

									for(int p=0;p<pcount;p++)
									{
										int page = lfr.ReadInt32();
										fsDB.Position=PageSize*page;
										byte[] buf = lfr.ReadBytes( Database.PageSize );
										bw.Write( buf );
									}

									bw.Flush();
									lf.SetLength(0);
									lf.Flush();
								}
							}
						}
						catch
						{
							Close();
							throw new Exception("Can't recover from last crash.");
						}
						finally
						{
							lf.Close();
						}
					}
				}
				ArrayList pagePurgatory = new ArrayList();
				Hashtable htFieldsByTID = new Hashtable();// contains Hastables by field seq
				Hashtable htDataByTID = new Hashtable(); // key: tid + fieldseq + dataseq = pageno
				Set ProcessedPages = new SortedSet();
				#region 1st Pass: Scan deleted pages and table pages
				NextFID=-1;
				try
				{
					int pos=0; // page counter
					fsDB.Position=0;
					while(fsDB.Position<fsDB.Length)
					{	
						// leemos info de página
						long ptr = br.BaseStream.Position;
						bool bPageIsDeleted = br.ReadBoolean();
						if(bPageIsDeleted)
						{
							ProcessedPages.Add(pos);
							this.DeletedPages.Add(pos);
						}
						else
						{
							byte bPageType = br.ReadByte();
							int fid = br.ReadInt32();
							if(bPageType==TablePageType)
							{
								ProcessedPages.Add(pos);
								TableNameDef tnd = new TableNameDef(fid,pos);
								tnd.fseq=br.ReadInt32();
								tnd.rownum=br.ReadInt64();
								tnd.tname = br.ReadString();
								TID2Def[fid]=tnd;
								TableName2TID[tnd.tname]=fid;
							}
							else if(bPageType==FieldPageType)
							{// Page is a field def, store it for further processing
								ProcessedPages.Add(pos);
								int tid = fid;
								//TableNameDef tnd = TID2Def[tid] as TableNameDef;
								Field fld = new Field();
								fld.Read(br);// 4-field
								fld.tid=tid;
								fld.PageOfFieldSef=pos;

								if(!htFieldsByTID.ContainsKey(tid))
									htFieldsByTID[tid]=new Hashtable();
								Hashtable htFieldsBySeq = htFieldsByTID[tid] as Hashtable;

								// avoid repeated fields
								bool bAvoid=false;
								foreach(Field f in htFieldsBySeq.Values)
								{
									if(f.Name==fld.Name)
									{
										bAvoid=true;
										break;
									}
								}
								if(!bAvoid)
								{
									htFieldsBySeq[fld.seq]=fld;
									//tnd.fseq2FieldDef[fld.seq]=fld;
								}
								else
								{
									pagePurgatory.Add(pos);
								}
							}
							else if(bPageType==ContentPageType)
							{ 
								int tid = fid;
								if(!htDataByTID.ContainsKey(tid))
									htDataByTID[tid]=new Hashtable();
								Hashtable htDataByFSeq = htDataByTID[tid] as Hashtable;

								long fseq = br.ReadInt32(); // 4º seq of field
								if(!htDataByFSeq.ContainsKey(fseq))
									htDataByFSeq[fseq]=new ArrayList();
								ArrayList alDataByOrder = htDataByFSeq[fseq] as ArrayList;

								int seq = br.ReadInt32(); // 5º data page order
								while(alDataByOrder.Count<=seq)
									alDataByOrder.Add(-1);
								alDataByOrder[seq]=pos;
							}
							NextFID = Math.Max( NextFID, fid );
							PeekPagesByFID(fid).Add(pos);
						}
						fsDB.Position = Database.PageSize + ptr;
						pos++;
					}
					NextFID++;
				}
				catch(Exception ex)
				{
					this.LogToFile(ex.Message,ex.StackTrace);
					this.Close();
					throw new Exception("Database corrupted.");
				}
				#endregion
				#region 2nd Pass: Field integration
//				try
//				{
					foreach(int tid in htFieldsByTID.Keys)
					{
						TableNameDef tnd = TID2Def[tid] as TableNameDef;
						Hashtable htFieldsBySeq = htFieldsByTID[tid] as Hashtable;
						foreach(long seq in htFieldsBySeq.Keys)
						{
							tnd.fseq2FieldDef[seq]=htFieldsBySeq[seq];
						}
					}
//
//					int pos=0; // page counter
//					fsDB.Position=0;
//					while(fsDB.Position<fsDB.Length)
//					{	
//						// leemos info de página
//						long ptr = br.BaseStream.Position;
//						if(!ProcessedPages.Contains(pos))
//						{
//							bool bPageIsDeleted = br.ReadBoolean();// 1-deleted
//							if(bPageIsDeleted)
//							{
//								// skip
//							}
//							else
//							{
//								byte bPageType = br.ReadByte();// 2-type
//								int tid = br.ReadInt32(); // 3-fid of table
//								if(bPageType==FieldPageType)
//								{
//									ProcessedPages.Add(pos);
//									TableNameDef tnd = TID2Def[tid] as TableNameDef;
//									Field fld = new Field();
//									fld.Read(br);// 4-field
//									fld.tid=tid;
//									fld.PageOfFieldSef=pos;
//
//									// avoid repeated fields
//									bool bAvoid=false;
//									foreach(Field f in tnd.fseq2FieldDef.Values)
//									{
//										if(f.Name==fld.Name)
//										{
//											bAvoid=true;
//											break;
//										}
//									}
//									if(!bAvoid)
//									{
//										tnd.fseq2FieldDef[fld.seq]=fld;
//									}
//									else
//									{
//										pagePurgatory.Add(pos);
//									}
//								}
//							}
//						}
//						fsDB.Position = Database.PageSize + ptr;
//						pos++;
//					}
//				}
//				catch(Exception ex)
//				{
//					this.LogToFile(ex.Message,ex.StackTrace);
//					this.Close();
//					throw new Exception("Database corrupted.");
//				}
				#endregion
				#region 3nd Pass: Locate data for fields
				try
				{
					foreach(int tid in htDataByTID.Keys)
					{
						TableNameDef tnd = TID2Def[tid] as TableNameDef;
						Hashtable htDataByFSeq = htDataByTID[tid] as Hashtable;
						foreach(long seq in htDataByFSeq.Keys)
						{
							ArrayList alDataByOrder = htDataByFSeq[seq] as ArrayList;
							if(!tnd.fseq2FieldDef.ContainsKey(seq))
							{
								pagePurgatory.AddRange( alDataByOrder );
							}
							else
							{
								Field fld = tnd.fseq2FieldDef[seq] as Field;
								fld.DataFID=alDataByOrder;
							}
						}
					}
//					int pos=0; // page counter
//					fsDB.Position=0;
//					while(fsDB.Position<fsDB.Length)
//					{	
//						// leemos info de página
//						long ptr = br.BaseStream.Position;
//						if(!ProcessedPages.Contains(pos))
//						{
//							bool bPageIsDeleted = br.ReadBoolean();// 1º deleted is on?
//							if(bPageIsDeleted)
//							{
//								// skip
//							}
//							else
//							{
//								byte bPageType = br.ReadByte();// 2º Type
//								int tid = br.ReadInt32();// 3º fid of table
//								if(bPageType==ContentPageType)
//								{ 
//									long fseq = br.ReadInt32(); // 4º seq of field
//									int seq = br.ReadInt32(); // 5º data page order
//									TableNameDef tnd = TID2Def[tid] as TableNameDef;
//									if(!tnd.fseq2FieldDef.ContainsKey(fseq))
//									{
//										pagePurgatory.Add(pos);
//									}
//									Field fld = tnd.fseq2FieldDef[fseq] as Field;
//									while(fld.DataFID.Count<=seq)
//										fld.DataFID.Add(-1);
//									fld.DataFID[seq]=pos;
//								}
//							}
//						}
//						fsDB.Position = Database.PageSize + ptr;
//						pos++;
//					}
					foreach(TableNameDef tnd in TID2Def.Values)
						foreach(Field f in tnd.fseq2FieldDef.Values)
							foreach(int page in f.DataFID)
								if(page==-1)
									throw new Exception("Database corrupted.");
				}
				catch(Exception ex)
				{
					this.LogToFile(ex.Message,ex.StackTrace);
					this.Close();
					throw new Exception("Database corrupted.");
				}
				#endregion
				foreach(TableNameDef tnd in TID2Def.Values)
					foreach(Field f in tnd.fseq2FieldDef.Values)
					{
						// grow if it is needed
						if(tnd.rownum>0)
						{
							int valSize = (int)f.DataSize();
							long Capacity = (PageSize-ContentPageDataOffset)/valSize;
							ArrayList pages = f.DataFID;
							while((pages.Count*Capacity)<tnd.rownum)
							{
								int datapage = this.LockAvaiblePage();
								bw.BaseStream.Position = (datapage*PageSize);
								bw.Write( true );
								bw.Flush();
								bw.Write( (byte)Database.ContentPageType );
								bw.Write( tnd.TableFID );
								bw.Write( (int)f.seq );
								bw.Write( f.DataFID.Count );
								bw.Flush();
								for(int c=0;c<Capacity;c++)
								{
									bw.BaseStream.Position = (datapage*PageSize)+ContentPageDataOffset+c*valSize;
									f.WriteDefaultData(bw,false);
								}
								bw.Flush();
								bw.BaseStream.Position = (datapage*PageSize);
								bw.Write( (bool)false );
								bw.Flush();
								pages.Add(datapage);
								PeekPagesByFID(tnd.TableFID).Add(datapage);
								this.InvalidatePage(datapage);
							}
						}
					}


				// Autoseq table
				this.AddTableIfNotExist(tblSequences);
				this.AddFieldIfNotExist(tblSequences, new Field("SEQNAME","",FieldIndexing.Unique,40));
				this.AddFieldIfNotExist(tblSequences, new Field("SEQVALUE",(long)0,FieldIndexing.None));
				this.AddFieldIfNotExist(tblSequences, new Field("SEQINCREMENT",(long)1,FieldIndexing.None));
				this.AddFieldIfNotExist(tblSequences, new Field("SEQLOOP",false,FieldIndexing.None));
				this.AddFieldIfNotExist(tblSequences, new Field("SEQMAXVALUE",long.MaxValue,FieldIndexing.None));

				// Autoseq table
				this.AddTableIfNotExist(tblAlterTbl);
				this.AddFieldIfNotExist(tblAlterTbl, new Field("TNAME","",FieldIndexing.None,80));
				this.AddFieldIfNotExist(tblAlterTbl, new Field("FSRC","",FieldIndexing.None,80));
				this.AddFieldIfNotExist(tblAlterTbl, new Field("FTMP","",FieldIndexing.None,80));
				this.AddFieldIfNotExist(tblAlterTbl, new Field("STATE",(int)1,FieldIndexing.None));

				// Unknown bugfix -> Purge pages
				foreach(int i in pagePurgatory)
				{
					if(i==-1) continue;
					bw.BaseStream.Position = (i*PageSize);
					bw.Write( true );
					bw.Flush();
				}
			}
			GC.Collect();
			GC.WaitForPendingFinalizers();
		}
Ejemplo n.º 2
0
		/// <summary>
		/// Adds a field if it not exists
		/// </summary>
		public void AddFieldIfNotExist(string TableName, Field f)
		{
			lock(this.TableBlocking)
			{
				Field[] flds = this.GetFields(TableName);
				Field DeletedField=null;
				foreach(Field i in flds)
				{
					if(i.Name==DeletedFieldName)
					{
						DeletedField=i;
						break;
					}
				}
				if(DeletedField==null) 
					throw new Exception("Deleted field not found.");
				foreach(Field i in flds)
				{
					if(i.Name==f.Name)
					{
						if((f.Indexing==FieldIndexing.IndexedNotUnique) && (i.Indexing==FieldIndexing.Unique))
						{
							// convert a field into non unique
							bw.BaseStream.Position = i.PageOfFieldSef*PageSize+1+1+4;
							i.bUnique=false;
							i.Write(bw);
							bw.Flush();
							if(this.TableBlocking[TableName]!=null)
								this.TableBlocking.Remove(TableName);
						}
						else if((f.Indexing==FieldIndexing.IndexedNotUnique) && (i.Indexing==FieldIndexing.None))
						{
							// convert a field into non unique
							bw.BaseStream.Position = i.PageOfFieldSef*PageSize+1+1+4;
							i.bIndexed=true;
							i.bUnique=false;
							i.Write(bw);
							bw.Flush();

							// This is not necessary but...
							if(this.TableBlocking[TableName]!=null)
								this.TableBlocking.Remove(TableName);
						}
						else if(i.type==FieldType.ftInt32 && f.type==FieldType.ftInt64)
						{// Upgrading Int32 to Int64
							this.LogToFile("Upgrading Int32 to Int64","Detected on field '"+i.Name+"', table '"+TableName+"'");
							// Añadimos un campo temporal
							Field F = new Field();
							F.Indexing=i.Indexing;
							F.Name=i.Name+"$tmp";
							F.type=f.type;
							F.DefaultValue=f.DefaultValue;

							this.Insert(tblAlterTbl,
								new object[,]{{"TNAME",TableName},{"STATE",0},{"FSRC",i.Name},{"FTMP",F.Name}});

							this.LogToFile("Upgrading Int32 to Int64","Temp field");
							AddField(TableName,F);

							this.ForcedInsert(tblAlterTbl,
								new object[,]{{"TNAME",TableName},{"FSRC",i.Name}},
								new object[,]{{"STATE",1}}
								);

							this.LogToFile("Upgrading Int32 to Int64","Copying data");
							int tid = (int)this.TableName2TID[TableName];
							TableNameDef tnd = this.TID2Def[tid] as TableNameDef;
							long[] data = new long[tnd.rownum];
							if(true)
							{// READ SOURCE DATA
								int valSize = (int)i.DataSize();
								long Capacity = (PageSize-ContentPageDataOffset)/valSize;
								ArrayList pages = i.DataFID;
								if((pages.Count*Capacity)<tnd.rownum)
									throw new Exception("Row num corrupted.");

								for(long rowid=0L;rowid<tnd.rownum;rowid++)
								{								
									long npage = rowid / Capacity;
									long offset = rowid % Capacity;
									int page = (int)pages[(int)npage];
									int SRC = (int)i.ReadData( this.PageReader(page,ContentPageDataOffset+offset*valSize) );
									data[rowid]=(long)SRC;
								}
							}
							if(true)
							{// WRITE SOURCE DATA
								int valSize = (int)F.DataSize();
								long Capacity = (PageSize-ContentPageDataOffset)/valSize;
								ArrayList pages = F.DataFID;
								if((pages.Count*Capacity)<tnd.rownum)
									throw new Exception("Row num corrupted.");

								for(long rowid=0L;rowid<tnd.rownum;rowid++)
								{								
									long npage = rowid / Capacity;
									long offset = rowid % Capacity;
									int page = (int)pages[(int)npage];
									bw.BaseStream.Position=ContentPageDataOffset+offset*valSize;
									Variant v = new Variant();
									v.obj=data[rowid];
									v.type=F.type;
									F.WriteData(bw,v,true);
									this.InvalidatePage(page);
								}
							}

							// COPY ENDED
							this.ForcedInsert(tblAlterTbl,
								new object[,]{{"TNAME",TableName},{"FSRC",i.Name}},
								new object[,]{{"STATE",2}}
								);

							this.LogToFile("Upgrading Int32 to Int64","Drop original");
							this.DropField(TableName,i.Name);

							// SOURCE FIELD DROPPED
							this.ForcedInsert(tblAlterTbl,
								new object[,]{{"TNAME",TableName},{"FSRC",i.Name}},
								new object[,]{{"STATE",3}}
								);

							// RENAME FIELD
							this.LogToFile("Upgrading Int32 to Int64","Replace original");
							//int page = this.LockAvaiblePage();
							this.fsDB.Position = F.PageOfFieldSef*PageSize;
							this.fsDB.Position += 1; //bw.Write( (bool)true ); // deleted
							//bw.Flush();
							this.fsDB.Position += 1; //bw.Write( (byte)FieldPageType ); 
							this.fsDB.Position += 4; //bw.Write( tid );
							F.Name=i.Name;
							//f.seq=fseq;
							//f.tid=tid;
							F.Write(bw);
							bw.Flush();
							//f.PageOfFieldSef=page;

							// PROCESS ENDED
							this.Delete(tblAlterTbl,
								new object[,]{{"TNAME","=",TableName},{"FSRC","=",i.Name}}
								);						

							// Force indexing flushing
							if(this.TableBlocking[TableName]!=null)
								this.TableBlocking.Remove(TableName);
						}
						return;
					}
					
				}
				this.AddField(TableName,f);
			}
		}
Ejemplo n.º 3
0
		/// <summary>
		/// Imports from R1 to an R2 empty database.
		/// </summary>
		/// <param name="path"></param>
		/// <returns></returns>
		public bool ImportFromR1( string path )
		{
			// Destination bd (this) must have only one table (seqs)
			string[] localTables; 
			this.GetTableNames(out localTables);
			if(localTables.Length!=1) return false;
			HyperNetDatabase.R1.Database r1DB = new HyperNetDatabase.R1.Database(path);
			string[] srcTables;
			r1DB.GetTableNames(out srcTables);
			foreach(string table in srcTables)
			{
				this.AddTableIfNotExist(table);
				HyperNetDatabase.R1.Field[] srcfs = r1DB.GetFields(table);
				foreach(HyperNetDatabase.R1.Field srcf in srcfs)
				{
					HyperNetDatabase.R2.Field newf = new Field();
					newf.DefaultValue=Variant.Object2Variant(srcf.DefaultValue.obj);
					newf.len=srcf.len;
					newf.Name=srcf.Name;
					newf.type= (FieldType)((int)srcf.type);
					newf.bIndexed=srcf.bIndexed;
					newf.bUnique=srcf.bUnique;
					this.AddFieldIfNotExist(table,newf);
				}
				DataTable srcdt = r1DB.Select(null, table, null);
				foreach(DataRow dr in srcdt.Rows)
				{
					object[] values = dr.ItemArray;
					object[,] NameAndValues = new object[srcdt.Columns.Count,2];
					for(int n=0;n<values.Length;n++)
					{
						NameAndValues[n,0] = srcdt.Columns[n].ColumnName;
						NameAndValues[n,1] = values[n];
					}
					this.Insert(table,NameAndValues);
				}
			}
			return true;
		}
Ejemplo n.º 4
0
		/// <summary>
		/// Reads or regenerates an index for a field in a table
		/// </summary>
		/// <param name="TableName"></param>
		/// <param name="f"></param>
		/// <value></value>
		Index GetIndex(string TableName, Field f)
		{

				lock(this.GetTableLock(TableName))
				{
					if((this.TableBlocking[TableName] as SortedList)[f.seq]!=null)
						return (this.TableBlocking[TableName] as SortedList)[f.seq] as Index;
	
					// unique key
					if(!f.bIndexed) 
						throw new Exception("Not indexed field.");

					Index ndx = new Index();
					ndx.bUnique=f.bUnique;

					int tid = (int)TableName2TID[TableName];
					TableNameDef tnd = TID2Def[tid] as TableNameDef;
					int valSize = (int)f.DataSize();
					int Capacity = (PageSize-ContentPageDataOffset)/valSize;
					ArrayList pages = f.DataFID;
					if((pages.Count*Capacity)<tnd.rownum)
						throw new Exception("Row num corrupted.");
					if(f.seq==0)
					{
						for(int row=0;row<tnd.rownum;row++)
						{
							int npage = row / Capacity;
							int offset = row % Capacity;
							int page = (int)pages[npage];
							//br.BaseStream.Position = (page*PageSize)+ContentPageDataOffset+offset*valSize;
							//object val = f.ReadData(br);
							object val = f.ReadData( this.PageReader(page,ContentPageDataOffset+offset*valSize) );
							ndx.Add(val,row,f.Name);
						}
					}
					else
					{// exclude deleted
						Index ndxDeleted = GetIndex(TableName,tnd.fseq2FieldDef[(long)0] as Field);
						Set dset = ndxDeleted.GetRowSet(true);
						for(long row=0;row<tnd.rownum;row++)
						{
							if(!dset.Contains(row))
							{
								int npage = (int)(row / Capacity);
								int offset = (int)( row % Capacity);
								int page = (int)pages[npage];
								//br.BaseStream.Position = (page*PageSize)+ContentPageDataOffset+offset*valSize;
								//object val = f.ReadData(br);
								object val = f.ReadData( this.PageReader(page,ContentPageDataOffset+offset*valSize) );
								ndx.Add(val,row,f.Name);
							}
						}
					}
					(this.TableBlocking[TableName] as SortedList)[f.seq]=ndx;
					return ndx;
				}

		}
Ejemplo n.º 5
0
		/// <summary>
		/// Get fieldnames of a table
		/// </summary>
		internal Field[] GetFields(string Name)
		{
			lock(this.TableBlocking)
			{
				
					if(!TableName2TID.Contains(Name))
						throw new Exception("Table not present.");
					int tid = (int)TableName2TID[Name];
					TableNameDef tnd = this.TID2Def[tid] as TableNameDef;
					Field[] flds = new Field[tnd.fseq2FieldDef.Values.Count];
					tnd.fseq2FieldDef.Values.CopyTo(flds,0);
					return flds;
				
			}
		}
Ejemplo n.º 6
0
		/// <summary>
		/// Adds a field
		/// </summary>
		public void AddField(string TableName, Field f)
		{	
			lock(this.TableBlocking)// Total blocking required
			{

				string tbl = TableName;
				QueryCacheDestroy(TableName);
				Field[] flds = GetFields(TableName);
				foreach(Field i in flds)
				{
					if(f.Name==i.Name)
						throw new Exception("Column already present.");
				}
				//FieldsCache[TableName]=null; // cancel Field Cache
				int tid = (int)this.TableName2TID[TableName];
				TableNameDef tnd = this.TID2Def[tid] as TableNameDef;
				try
				{
					// auto increment field seq
					int fseq = tnd.fseq++;
					this.fsDB.Position = tnd.PageOfTableDef * PageSize;
					this.fsDB.Position += 1; // skip delete
					this.fsDB.Position += 1; // skip type
					this.fsDB.Position += 4; // skip fid
					bw.Write( tnd.fseq );
					bw.Flush();

					// build page
					int page = this.LockAvaiblePage();
					this.fsDB.Position = page*PageSize;
					bw.Write( (bool)true ); // deleted
					bw.Flush();
					bw.Write( (byte)FieldPageType ); 
					bw.Write( tid );
					f.seq=fseq;
					f.tid=tid;
					f.Write(bw);
					bw.Flush();
					f.PageOfFieldSef=page;
					f.DataFID = new ArrayList();
					this.fsDB.Position = page*PageSize;
					bw.Write( (bool)false ); // active
					bw.Flush();
					tnd.fseq2FieldDef[f.seq]=f;
					PeekPagesByFID(tid).Add(page);

					// grow if it is needed
					if(tnd.rownum>0)
					{
						int valSize = (int)f.DataSize();
						long Capacity = (PageSize-ContentPageDataOffset)/valSize;
						ArrayList pages = f.DataFID;
						while((pages.Count*Capacity)<tnd.rownum)
						{
							int datapage = this.LockAvaiblePage();
							bw.BaseStream.Position = (datapage*PageSize);
							bw.Write( true );
							bw.Flush();
							bw.Write( (byte)Database.ContentPageType );
							bw.Write( tnd.TableFID );
							bw.Write( (int)f.seq );
							bw.Write( f.DataFID.Count );
							bw.Flush();
							for(int c=0;c<Capacity;c++)
							{
								bw.BaseStream.Position = (datapage*PageSize)+ContentPageDataOffset+c*valSize;
								f.WriteDefaultData(bw,false);
							}
							bw.Flush();
							bw.BaseStream.Position = (datapage*PageSize);
							bw.Write( (bool)false );
							bw.Flush();
							pages.Add(datapage);
							PeekPagesByFID(tid).Add(datapage);
							this.InvalidatePage(datapage);
						}
					}
				}
				catch(Exception ex)
				{
					this.LogToFile(ex.Message,ex.StackTrace);
					throw new Exception("Unhandled exception at AddField.");
				}
			}
			
		}
Ejemplo n.º 7
0
		void DumpPage(StreamWriter sw,int page,int ident)
		{
			fsDB.Position=page*PageSize;
			bool deleted = br.ReadBoolean();
			byte type = br.ReadByte();
			int fid = br.ReadInt32();
			sw.Write( new string(' ',ident) );
			sw.Write( "Page num: "+page.ToString("N4")+" " );
			sw.Write( "Deleted: "+deleted.ToString()+" " );
			if(!deleted)
			{
				sw.Write( "TID: "+fid.ToString()+" " );
				if(type==Database.TablePageType)
				{
					sw.Write( "Type: Table   " );
					sw.Write( "fseq: "+br.ReadInt32().ToString("G04")+" " );
					sw.Write( "rownum: "+br.ReadInt64().ToString()+" " );
					sw.Write( "tname: "+br.ReadString().ToString()+" " );
				}
				else if(type==Database.FieldPageType)
				{
					sw.Write( "Type: Field   " );
					Field fld = new Field();
					fld.Read(br);// 4-field
					sw.Write( "fseq: "+fld.seq.ToString()+" " );
					sw.Write( "name: "+fld.Name.ToString()+" " );
					sw.Write( "type: "+fld.type.ToString()+" " );
				}
				else if(type==Database.ContentPageType)
				{
					sw.Write( "Type: Content " );
					sw.Write( "fseq: "+br.ReadInt32().ToString("G04")+" " );
					sw.Write( "PageOrder: "+br.ReadInt32().ToString("G04")+" " );
				}
			}
			sw.WriteLine("");
		}
Ejemplo n.º 8
0
		/// <summary>
		/// Adds a table
		/// </summary>
		public void AddTable(string Name)
		{
			lock(this.TableBlocking)
			{
				this.RaiseExceptionIfClosed();
				if(TableName2TID.Contains(Name))
					throw new Exception("Table already present.");

				int TablePage = LockAvaiblePage();

				TableNameDef tnd=null;
				try
				{
					this.fsDB.Position = TablePage * PageSize;
					bw.Write( (bool)true ); // deleted
					bw.Flush();
					bw.Write( (byte)TablePageType ); // pagetype
					int tid = this.NextFID++;
					bw.Write( (int)tid ); // tid

					tnd = new TableNameDef(tid,TablePage);
					tnd.fseq=0;
					bw.Write( tnd.fseq );
					tnd.rownum=0;
					bw.Write( tnd.rownum );
					tnd.tname=Name;
					bw.Write( tnd.tname );
					bw.Flush();

					this.fsDB.Position = TablePage * PageSize;
					bw.Write( (bool)false ); // non-deleted
					bw.Flush();
					TID2Def[tid]=tnd;
					TableName2TID[tnd.tname]=tid;
					PeekPagesByFID(tid).Add(TablePage);
				}
				catch(Exception ex)
				{
					this.LogToFile(ex.Message,ex.StackTrace);
					throw new Exception("Write error.");
				}
				Field f = new Field(DeletedFieldName,true,FieldIndexing.IndexedNotUnique);
				AddField(tnd.tname,f);
			}
		}