public void OpenIndexes(IndexInfo[] info) { IxInfo = info; long curIndex = -1; var dt = new G.List <DataType>(); var cm = new G.List <int>(); for (int i = 0; i <= info.Length; i += 1) { if (i > 0 && (i == info.Length || info[i].IndexId != curIndex)) { IndexFileInfo ci = new IndexFileInfo(); dt.Add(DataType.Bigint); // For id value ci.KeyCount = dt.Count; ci.Types = dt.ToArray(); ci.BaseIx = cm.ToArray(); ci.IndexId = curIndex; OpenIndex(curIndex, ci); dt = new G.List <DataType>(); cm = new G.List <int>(); } if (i != info.Length) { curIndex = info[i].IndexId; int colIx = info[i].ColIx; dt.Add(CI.Type[colIx]); cm.Add(colIx); } } }
FormFile[] ParseMultipart(System.Net.HttpListenerRequest rq) { /* Typical multipart body would be: * ------WebKitFormBoundaryVXXOTFUWdfGpOcFK * Content-Disposition: form-data; name="f1"; filename="test.txt" * Content-Type: text/plain * * Hello there * * ------WebKitFormBoundaryVXXOTFUWdfGpOcFK * Content-Disposition: form-data; name="submit" * * Upload * ------WebKitFormBoundaryVXXOTFUWdfGpOcFK-- */ var flist = new G.List <FormFile>(); byte[] data = ToByteArray(rq.InputStream); System.Text.Encoding encoding = System.Text.Encoding.UTF8; // Not entirely clear what encoding should be used for headers. int pos = 0; /* Index into data */ while (true) { int headerLength = IndexOf(data, encoding.GetBytes("\r\n\r\n"), pos) - pos + 4; if (headerLength < 4) { break; } string headers = encoding.GetString(data, pos, headerLength); pos += headerLength; // The first header line is the delimiter string delimiter = headers.Substring(0, headers.IndexOf("\r\n")); // Extract atrtributes from header string contentType = Look(@"(?<=Content\-Type:)(.*?)(?=\r\n)", headers); string name = Look(@"(?<= name\=\"")(.*?)(?=\"")", headers); string filename = Look(@"(?<=filename\=\"")(.*?)(?=\"")", headers); // Get the content length byte[] delimiterBytes = encoding.GetBytes("\r\n" + delimiter); int contentLength = IndexOf(data, delimiterBytes, pos) - pos; if (contentLength < 0) { break; } // Extract the content from data byte[] content = new byte[contentLength]; System.Buffer.BlockCopy(data, pos, content, 0, contentLength); pos += contentLength + delimiterBytes.Length; flist.Add(new FormFile(name, contentType, filename, content)); } return(flist.ToArray()); }
public StdExp(G.List <Exp> arg, DataType type, DataType [] types, Exec e) { Arg = arg.ToArray(); Type = type; Types = types; if (Arg.Length != Types.Length) { e.Error(this + " takes " + Types.Length + " argument(s)"); } }
public override void Convert( DataType [] types, Exec e ) { for ( int i = 0; i < types.Length; i += 1 ) { DataType assignType = types[ i ]; Exp conv = Exps[ i ].Convert( assignType ); if ( conv == null ) e.Error( "Assign data type error" ); Exps[ i ] = conv; } Dvs = Util.GetDVList( Exps.ToArray() ); // Not very elegant that this operation is done twice. }
public void AlterTable( string schemaName, string tableName, G.List<AlterAction> alist, Exec e ) { Table t = (Table) GetTable( schemaName, tableName, e ); var names = new G.List<string>( t.Cols.Names ); var types = new G.List<DataType>( t.Cols.Types ); var map = new G.List<int>(); for ( int i = 0; i < names.Count; i += 1 ) map.Add( i ); foreach ( AlterAction aa in alist ) { int ix = names.IndexOf( aa.Name ); if ( aa.Operation != Action.Add && ix == -1 ) e.Error( "Column " + aa.Name + " not found" ); switch ( aa.Operation ) { case Action.Add: if ( ix != -1 ) e.Error( "Column " + aa.Name + " already exists" ); names.Add( aa.Name ); types.Add( aa.Type ); map.Add( 0 ); Sql( "INSERT INTO sys.Column( Table, Name, Type ) VALUES ( " + t.TableId + "," + Util.Quote(aa.Name) + "," + (int)aa.Type + ")" ); break; case Action.Drop: names.RemoveAt( ix ); types.RemoveAt( ix ); map.RemoveAt( ix ); Sql( "DELETE FROM sys.Column WHERE Table = " + t.TableId + " AND Name = " + Util.Quote(aa.Name) ); Sql( "EXEC sys.DroppedColumn(" + t.TableId + "," + ix + ")" ); break; case Action.ColumnRename: names.RemoveAt( ix ); names.Insert( ix, aa.NewName ); Sql( "UPDATE sys.Column SET Name = " + Util.Quote(aa.NewName) + " WHERE Table=" + t.TableId + " AND Name = " + Util.Quote(aa.Name) ); break; case Action.Modify: if ( DTI.Base( aa.Type ) != DTI.Base( types[ ix ] ) ) e.Error( "Modify cannot change base type" ); if ( DTI.Scale( aa.Type ) != DTI.Scale( types[ ix ] ) ) e.Error( "Modify cannot change scale" ); types.RemoveAt( ix ); types.Insert( ix, aa.Type ); Sql( "UPDATE sys.Column SET Type = " + (int)aa.Type + " WHERE Table=" + t.TableId + " AND Name = " + Util.Quote(aa.Name) ); Sql( "EXEC sys.ModifiedColumn(" + t.TableId + "," + ix + ")" ); break; } } var newcols = ColInfo.New( names, types ); t.AlterData( newcols, map.ToArray() ); Sql( "EXEC sys.RecreateModifiedIndexes()" ); t.OpenIndexes(); ResetCache(); }
public IndexInfo [] ReadIndexes( long tableId ) { var ix = new G.List<IndexInfo>(); var r = new RowCursor( SysIndexCol ); long CurIx = 0; int IxNum = 0; // Use SysIndexColIndex to avoid scanning the entire SysIndexCol table. var start = new LongStart( tableId ); foreach( IndexFileRecord ixr in SysIndexColIndex.From( start.Compare, false ) ) { if ( ixr.Col[0].L == tableId ) { r.Get( ixr.Col[1].L ); var ii = new IndexInfo(); ii.IndexId = r.V[2].L; if ( ii.IndexId != CurIx ) { IxNum = 0; CurIx = ii.IndexId; } ii.IxNum = IxNum++; ii.ColIx = (int)r.V[3].L; ix.Add( ii ); } else break; } return ix.ToArray(); }
public static ColInfo New(G.List <string> names, G.List <DataType> types) { return(new ColInfo(names.ToArray(), types.ToArray())); }
public ExpList( G.List<Exp> list ) { List = list.ToArray(); Type = DataType.None; }
public ExpFuncCall( string schema, string fname, G.List<Exp> plist ) { Schema = schema; FuncName = fname; Plist = plist.ToArray(); }
public Select( G.List<Exp> exps, TableExpression te, Exp where, Exp[] group, OrderByExp[] order, bool [] used, SqlExec x ) { /* There is more work to be done here, for example 2 * SUM(Total) is currently not allowed. Also if there is a GROUP BY, SELECT expressions should not be allowed to access fields not in the group list, unless thereis an enclosing aggregate function. Also maybe common sub-expression analysis, and perhaps constant folding, could be done? */ Exps = exps; TE = te; Where = where; Order = order; ColumnCount = exps.Count; var names = new string[ ColumnCount ]; var types = new DataType[ ColumnCount ]; for ( int i = 0; i < ColumnCount; i += 1 ) { names[ i ] = exps[ i ].Name; types[ i ] = exps[ i ].Type; } CI = new ColInfo( names, types ); if ( x.ParseOnly ) return; Used = Util.ToList( used ); if ( group != null ) { // Compute AggSpec and GroupSpec var alist = new G.List<AggSpec>(); for ( int i = 0; i < exps.Count; i += 1 ) { Exp e = exps[ i ]; AggOp op = e.GetAggOp(); if ( op != AggOp.None ) { AggSpec a = new AggSpec(); a.ColIx = i; a.Type = e.Type; a.Op = op; alist.Add( a ); } } AggSpec = alist.ToArray(); var glist = new G.List<GroupSpec>(); for ( int i=0; i < group.Length; i += 1 ) { GroupSpec g = new GroupSpec(); g.ColIx = Exps.Count; g.Type = group[ i ].Type; Exps.Add( group[ i ] ); glist.Add( g ); } GroupSpec = glist.ToArray(); } if ( Order != null ) { var sortSpec = new SortSpec[ Order.Length ]; for ( int i = 0; i < Order.Length; i += 1 ) { // Quite complicated as ORDER BY can use aliases or expressions. Exp e = Order[ i ].E; sortSpec[ i ].Desc = Order[ i ].Desc; int cix = -1; if ( e is ExpName ) { string alias = ((ExpName)e).ColName; for ( int j = 0; j < CI.Count; j += 1 ) { if ( CI.Name[j] == alias ) { e = Exps[ j ]; cix = j; break; } } } if ( cix < 0 ) { cix = Exps.Count; Exps.Add( e ); e.Bind( x ); } sortSpec[ i ].Type = e.Type; sortSpec[ i ].ColIx = cix; } SortSpec = sortSpec; } Dvs = Util.GetDVList( Exps.ToArray() ); if ( Where != null ) WhereD = Where.GetDB(); Ids = Where == null ? null : Where.GetIdSet( TE ); if ( Ids != null ) Ids = new IdCopy( Ids ); // Need to take a copy of the id values if an index is used. }
public ExpList( G.List<Exp> list ) { List = list.ToArray(); }
public Select( G.List<Exp> exps, TableExpression te, Exp where, Exp[] group, OrderByExp[] order, bool [] used, SqlExec x ) { Exps = exps; TE = te; Where = where; Order = order; Used = used; ColumnCount = exps.Count; var names = new string[ ColumnCount ]; var types = new DataType[ ColumnCount ]; for ( int i = 0; i < ColumnCount; i += 1 ) { names[i] = exps[i].Name; types[i] = exps[i].Type; } Cols = new ColInfo( names, types ); if ( x.ParseOnly ) return; if ( group != null ) { // Compute AggSpec and GroupSpec var alist = new G.List<AggSpec>(); for ( int i = 0; i < exps.Count; i += 1 ) { Exp e = exps[i]; AggOp op = e.GetAggOp(); if ( op != AggOp.None ) { AggSpec a = new AggSpec(); a.ColIx = i; a.Type = e.Type; a.Op = op; alist.Add( a ); } } AggSpec = alist.ToArray(); var glist = new G.List<GroupSpec>(); for ( int i=0; i < group.Length; i += 1 ) { GroupSpec g = new GroupSpec(); g.ColIx = Exps.Count; // Note: we could look in Exps to see if it is already there rather than adding an extra Exp. g.Type = group[ i ].Type; Exps.Add( group[ i ] ); glist.Add( g ); } GroupSpec = glist.ToArray(); } if ( Order != null ) { var sortSpec = new SortSpec[ Order.Length ]; for ( int i = 0; i < Order.Length; i += 1 ) { // Quite complicated as ORDER BY can use aliases or expressions. Exp e = Order[i].E; sortSpec[ i ].Desc = Order[i].Desc; bool found = false; if ( e is ExpName ) { string alias = ((ExpName)e).ColName; for ( int j = 0; j < Cols.Count; j += 1 ) { if ( Cols.Names[j] == alias ) { e = Exps[ j ]; found = true; break; } } } int cix = Exps.Count; Exps.Add( e ); if ( !found ) e.Bind( x ); sortSpec[ i ].Type = e.Type; sortSpec[ i ].ColIx = cix; } SortSpec = sortSpec; } }