/// <summary> /// Reads or regenerates an index for a field in a table /// </summary> /// <param name="TableName"></param> /// <param name="f"></param> /// <value></value> Index GetIndex(string TableName, Field f) { lock(this.GetTableLock(TableName)) { if((this.TableBlocking[TableName] as SortedList)[f.seq]!=null) return (this.TableBlocking[TableName] as SortedList)[f.seq] as Index; // unique key if(!f.bIndexed) throw new Exception("Not indexed field."); Index ndx = new Index(); ndx.bUnique=f.bUnique; int tid = (int)TableName2TID[TableName]; TableNameDef tnd = TID2Def[tid] as TableNameDef; int valSize = (int)f.DataSize(); int Capacity = (PageSize-ContentPageDataOffset)/valSize; ArrayList pages = f.DataFID; if((pages.Count*Capacity)<tnd.rownum) throw new Exception("Row num corrupted."); if(f.seq==0) { for(int row=0;row<tnd.rownum;row++) { int npage = row / Capacity; int offset = row % Capacity; int page = (int)pages[npage]; //br.BaseStream.Position = (page*PageSize)+ContentPageDataOffset+offset*valSize; //object val = f.ReadData(br); object val = f.ReadData( this.PageReader(page,ContentPageDataOffset+offset*valSize) ); ndx.Add(val,row,f.Name); } } else {// exclude deleted Index ndxDeleted = GetIndex(TableName,tnd.fseq2FieldDef[(long)0] as Field); Set dset = ndxDeleted.GetRowSet(true); for(long row=0;row<tnd.rownum;row++) { if(!dset.Contains(row)) { int npage = (int)(row / Capacity); int offset = (int)( row % Capacity); int page = (int)pages[npage]; //br.BaseStream.Position = (page*PageSize)+ContentPageDataOffset+offset*valSize; //object val = f.ReadData(br); object val = f.ReadData( this.PageReader(page,ContentPageDataOffset+offset*valSize) ); ndx.Add(val,row,f.Name); } } } (this.TableBlocking[TableName] as SortedList)[f.seq]=ndx; return ndx; } }
/// <summary> /// Adds a field if it not exists /// </summary> public void AddFieldIfNotExist(string TableName, Field f) { lock(this.TableBlocking) { Field[] flds = this.GetFields(TableName); Field DeletedField=null; foreach(Field i in flds) { if(i.Name==DeletedFieldName) { DeletedField=i; break; } } if(DeletedField==null) throw new Exception("Deleted field not found."); foreach(Field i in flds) { if(i.Name==f.Name) { if((f.Indexing==FieldIndexing.IndexedNotUnique) && (i.Indexing==FieldIndexing.Unique)) { // convert a field into non unique bw.BaseStream.Position = i.PageOfFieldSef*PageSize+1+1+4; i.bUnique=false; i.Write(bw); bw.Flush(); if(this.TableBlocking[TableName]!=null) this.TableBlocking.Remove(TableName); } else if((f.Indexing==FieldIndexing.IndexedNotUnique) && (i.Indexing==FieldIndexing.None)) { // convert a field into non unique bw.BaseStream.Position = i.PageOfFieldSef*PageSize+1+1+4; i.bIndexed=true; i.bUnique=false; i.Write(bw); bw.Flush(); // This is not necessary but... if(this.TableBlocking[TableName]!=null) this.TableBlocking.Remove(TableName); } else if(i.type==FieldType.ftInt32 && f.type==FieldType.ftInt64) {// Upgrading Int32 to Int64 this.LogToFile("Upgrading Int32 to Int64","Detected on field '"+i.Name+"', table '"+TableName+"'"); // Añadimos un campo temporal Field F = new Field(); F.Indexing=i.Indexing; F.Name=i.Name+"$tmp"; F.type=f.type; F.DefaultValue=f.DefaultValue; this.Insert(tblAlterTbl, new object[,]{{"TNAME",TableName},{"STATE",0},{"FSRC",i.Name},{"FTMP",F.Name}}); this.LogToFile("Upgrading Int32 to Int64","Temp field"); AddField(TableName,F); this.ForcedInsert(tblAlterTbl, new object[,]{{"TNAME",TableName},{"FSRC",i.Name}}, new object[,]{{"STATE",1}} ); this.LogToFile("Upgrading Int32 to Int64","Copying data"); int tid = (int)this.TableName2TID[TableName]; TableNameDef tnd = this.TID2Def[tid] as TableNameDef; long[] data = new long[tnd.rownum]; if(true) {// READ SOURCE DATA int valSize = (int)i.DataSize(); long Capacity = (PageSize-ContentPageDataOffset)/valSize; ArrayList pages = i.DataFID; if((pages.Count*Capacity)<tnd.rownum) throw new Exception("Row num corrupted."); for(long rowid=0L;rowid<tnd.rownum;rowid++) { long npage = rowid / Capacity; long offset = rowid % Capacity; int page = (int)pages[(int)npage]; int SRC = (int)i.ReadData( this.PageReader(page,ContentPageDataOffset+offset*valSize) ); data[rowid]=(long)SRC; } } if(true) {// WRITE SOURCE DATA int valSize = (int)F.DataSize(); long Capacity = (PageSize-ContentPageDataOffset)/valSize; ArrayList pages = F.DataFID; if((pages.Count*Capacity)<tnd.rownum) throw new Exception("Row num corrupted."); for(long rowid=0L;rowid<tnd.rownum;rowid++) { long npage = rowid / Capacity; long offset = rowid % Capacity; int page = (int)pages[(int)npage]; bw.BaseStream.Position=ContentPageDataOffset+offset*valSize; Variant v = new Variant(); v.obj=data[rowid]; v.type=F.type; F.WriteData(bw,v,true); this.InvalidatePage(page); } } // COPY ENDED this.ForcedInsert(tblAlterTbl, new object[,]{{"TNAME",TableName},{"FSRC",i.Name}}, new object[,]{{"STATE",2}} ); this.LogToFile("Upgrading Int32 to Int64","Drop original"); this.DropField(TableName,i.Name); // SOURCE FIELD DROPPED this.ForcedInsert(tblAlterTbl, new object[,]{{"TNAME",TableName},{"FSRC",i.Name}}, new object[,]{{"STATE",3}} ); // RENAME FIELD this.LogToFile("Upgrading Int32 to Int64","Replace original"); //int page = this.LockAvaiblePage(); this.fsDB.Position = F.PageOfFieldSef*PageSize; this.fsDB.Position += 1; //bw.Write( (bool)true ); // deleted //bw.Flush(); this.fsDB.Position += 1; //bw.Write( (byte)FieldPageType ); this.fsDB.Position += 4; //bw.Write( tid ); F.Name=i.Name; //f.seq=fseq; //f.tid=tid; F.Write(bw); bw.Flush(); //f.PageOfFieldSef=page; // PROCESS ENDED this.Delete(tblAlterTbl, new object[,]{{"TNAME","=",TableName},{"FSRC","=",i.Name}} ); // Force indexing flushing if(this.TableBlocking[TableName]!=null) this.TableBlocking.Remove(TableName); } return; } } this.AddField(TableName,f); } }
/// <summary> /// Adds a field /// </summary> public void AddField(string TableName, Field f) { lock(this.TableBlocking)// Total blocking required { string tbl = TableName; QueryCacheDestroy(TableName); Field[] flds = GetFields(TableName); foreach(Field i in flds) { if(f.Name==i.Name) throw new Exception("Column already present."); } //FieldsCache[TableName]=null; // cancel Field Cache int tid = (int)this.TableName2TID[TableName]; TableNameDef tnd = this.TID2Def[tid] as TableNameDef; try { // auto increment field seq int fseq = tnd.fseq++; this.fsDB.Position = tnd.PageOfTableDef * PageSize; this.fsDB.Position += 1; // skip delete this.fsDB.Position += 1; // skip type this.fsDB.Position += 4; // skip fid bw.Write( tnd.fseq ); bw.Flush(); // build page int page = this.LockAvaiblePage(); this.fsDB.Position = page*PageSize; bw.Write( (bool)true ); // deleted bw.Flush(); bw.Write( (byte)FieldPageType ); bw.Write( tid ); f.seq=fseq; f.tid=tid; f.Write(bw); bw.Flush(); f.PageOfFieldSef=page; f.DataFID = new ArrayList(); this.fsDB.Position = page*PageSize; bw.Write( (bool)false ); // active bw.Flush(); tnd.fseq2FieldDef[f.seq]=f; PeekPagesByFID(tid).Add(page); // grow if it is needed if(tnd.rownum>0) { int valSize = (int)f.DataSize(); long Capacity = (PageSize-ContentPageDataOffset)/valSize; ArrayList pages = f.DataFID; while((pages.Count*Capacity)<tnd.rownum) { int datapage = this.LockAvaiblePage(); bw.BaseStream.Position = (datapage*PageSize); bw.Write( true ); bw.Flush(); bw.Write( (byte)Database.ContentPageType ); bw.Write( tnd.TableFID ); bw.Write( (int)f.seq ); bw.Write( f.DataFID.Count ); bw.Flush(); for(int c=0;c<Capacity;c++) { bw.BaseStream.Position = (datapage*PageSize)+ContentPageDataOffset+c*valSize; f.WriteDefaultData(bw,false); } bw.Flush(); bw.BaseStream.Position = (datapage*PageSize); bw.Write( (bool)false ); bw.Flush(); pages.Add(datapage); PeekPagesByFID(tid).Add(datapage); this.InvalidatePage(datapage); } } } catch(Exception ex) { this.LogToFile(ex.Message,ex.StackTrace); throw new Exception("Unhandled exception at AddField."); } } }