public void Delete(string path) { //~ if path == "" { //~ return nil //~ } if (string.IsNullOrEmpty(path)) { return; } using (var defer = new Util.Defer()) { //~ b.l.Lock() //~ defer b.l.Unlock() _l.WaitOne(); defer.Add(() => _l.ReleaseMutex()); //~ basePath, key := b.path(path) //~ fullPath := filepath.Join(basePath, key) var(basePath, key) = GetPath(path); var fullPath = System.IO.Path.Combine(basePath, key); //~ err := os.Remove(fullPath) //~ if err != nil && !os.IsNotExist(err) { //~ return fmt.Errorf("Failed to remove %q: %v", fullPath, err) //~ } //~ //~ err = b.cleanupLogicalPath(path) //~ //~ return err File.Delete(fullPath); CleanupLogicalPath(path); } }
// List is used ot list all the keys under a given // prefix, up to the next prefix. //~ func (i *InmemBackend) List(prefix string) ([]string, error) { public async Task <IEnumerable <string> > ListAsync(string prefix) { using (var defer = new Util.Defer()) { //~ i.permitPool.Acquire() //~ defer i.permitPool.Release() await _permitPool.Acquire(); //~ i.l.RLock() //~ defer i.l.RUnlock() _l.EnterReadLock(); defer.Add(() => _l.ExitReadLock()); //~ var out []string //~ seen := make(map[string]interface{}) var @out = new List <string>(); var seen = new Dictionary <string, bool>(); //~ walkFn := func(s string, v interface{}) bool { //~ trimmed := strings.TrimPrefix(s, prefix) //~ sep := strings.Index(trimmed, "/") //~ if sep == -1 { //~ out = append(out, trimmed) //~ } else { //~ trimmed = trimmed[:sep+1] //~ if _, ok := seen[trimmed]; !ok { //~ out = append(out, trimmed) //~ seen[trimmed] = struct{}{} //~ } //~ } //~ return false //~ } Walker <Entry> walkFn = (s, v) => { var trimmed = s.StartsWith(prefix) ? s.Substring(prefix.Length) : s; var sep = trimmed.IndexOf('/'); if (sep == -1) { @out.Add(trimmed); } else { trimmed = trimmed.Substring(0, sep + 1); if (!seen.ContainsKey(trimmed)) { @out.Add(trimmed); seen[trimmed] = true; } } return(false); }; //~ i.root.WalkPrefix(prefix, walkFn) //~ return out, nil _root.WalkPrefix(prefix, walkFn); return(@out); } }
//~ func TestFileBackend(t *testing.T) { public void TestFileBackend() { //~ dir, err := ioutil.TempDir("", "vault") //~ if err != nil { //~ t.Fatalf("err: %s", err) //~ } //~ defer os.RemoveAll(dir) //~ //~ logger := logformat.NewVaultLogger(log.LevelTrace) //~ //~ b, err := NewBackend("file", logger, map[string]string{ //~ "path": dir, //~ }) //~ if err != nil { //~ t.Fatalf("err: %s", err) //~ } //~ //~ testBackend(t, b) //~ testBackend_ListPrefix(t, b) using (var defer = new Util.Defer()) using (var lf = VaultLogger.CreateLoggerFactory(LogLevel.Trace)) { var dir = Util.IO.Directory.CreateTempDir(prefix: "vault"); defer.Add(() => { Debug.WriteLine($"WOULD DELETE ALL AT: [{dir}]"); //Directory.Delete(dir, true); }); lf.AddConsole(LogLevel.Trace); var logger = lf.CreateLogger <FileBackend>(); var b = Physical.Global.NewBackend("file", logger, new Util.ConfigMap <string> { ["path"] = dir, }); PhysicalTests.TestBackend(b); PhysicalTests.TestBackend_ListPrefix(b); } }
// Delete is used to permanently delete an entry //~ func (i *InmemBackend) Delete(key string) error { public async Task DeleteAsync(string key) { //~ i.permitPool.Acquire() //~ defer i.permitPool.Release() //~ //~ i.l.Lock() //~ defer i.l.Unlock() //~ //~ i.root.Delete(key) //~ return nil using (var defer = new Util.Defer()) { await _permitPool.Acquire(); defer.Add(async() => await _permitPool.Release()); _l.EnterWriteLock(); defer.Add(() => _l.ExitWriteLock()); _root.GoDelete(key); } }
// Put is used to insert or update an entry //~ func (i *InmemBackend) Put(entry *Entry) error { public async Task PutAsync(Entry entry) { //~ i.permitPool.Acquire() //~ defer i.permitPool.Release() //~ //~ i.l.Lock() //~ defer i.l.Unlock() //~ //~ i.root.Insert(entry.Key, entry) //~ return nil using (var defer = new Util.Defer()) { await _permitPool.Acquire(); defer.Add(async() => await _permitPool.Release()); _l.EnterWriteLock(); defer.Add(() => _l.ExitWriteLock()); _root.GoInsert(entry.Key, entry); } }
// Get is used to fetch an entry //~ func (i *InmemBackend) Get(key string) (*Entry, error) { public async Task <Entry> GetAsync(string key) { //~ i.permitPool.Acquire() //~ defer i.permitPool.Release() //~ //~ i.l.RLock() //~ defer i.l.RUnlock() //~ //~ if raw, ok := i.root.Get(key); ok { //~ return raw.(*Entry), nil //~ } //~ return nil, nil using (var defer = new Util.Defer()) { await _permitPool.Acquire(); defer.Add(async() => await _permitPool.Release()); _l.EnterReadLock(); defer.Add(() => _l.ExitReadLock()); return(_root.GoGet(key).value); } }
// This is not a real test. This is just a helper process kicked off by tests. //~ func TestExternalTokenHelperProcess(*testing.T) { public void TestExternalTokenHelperProcess() { //~ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { //~ return //~ } if (Environment.GetEnvironmentVariable("GO_WANT_HELPER_PROCESS") != "1") { return; } using (var defer = new Util.Defer()) { //~ defer os.Exit(0) defer.Add(() => Environment.Exit(0)); //~ args:= os.Args //~ for len(args) > 0 { //~ if args[0] == "--" { //~ args = args[1:] //~ break //~ } //~ //~ args = args[1:] //~ } var args = Environment.GetCommandLineArgs().ToList(); while (args.Count > 0) { if (args[0] == "--") { args.RemoveAt(0); break; } args.RemoveAt(0); } //~ if len(args) == 0 { //~ fmt.Fprintf(os.Stderr, "No command\n") //~ os.Exit(2) //~ } if (args.Count == 0) { Assert.Fail("No command"); } //~ cmd, args:= args[0], args[1:] var cmd = args[0]; args.RemoveAt(0); //~ switch cmd { switch (cmd) { //~ case "helper": //~ path:= os.Getenv("GO_HELPER_PATH") case "helper": var path = Environment.GetEnvironmentVariable("GO_HELPER_PATH"); //~ switch args[0] { switch (args[0]) { case "erase": //~ os.Remove(path) File.Delete(path); break; case "get": //~ f, err:= os.Open(path) //~ if os.IsNotExist(err) { //~ return //~ } //~ if err != nil { //~ fmt.Fprintf(os.Stderr, "Err: %s\n", err) //~ os.Exit(1) //~ } //~ defer f.Close() //~ io.Copy(os.Stdout, f) if (!File.Exists(path)) { return; } var f = File.Open(path, FileMode.Open); defer.Add(() => f.Dispose()); f.CopyTo(Console.OpenStandardOutput()); break; case "store": //~ f2, err:= os.Create(path) //~ if err != nil { //~ fmt.Fprintf(os.Stderr, "Err: %s\n", err) //~ os.Exit(1) //~ } //~ defer f.Close() //~ io.Copy(f, os.Stdin) f = File.Open(path, FileMode.Create); defer.Add(() => f.Dispose()); Console.OpenStandardInput().CopyTo(f); break; } break; default: //~ fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd) //~ os.Exit(2) throw new Exception("Unknown command: " + cmd); } } }
//~ func (f *AuditFormatter) FormatRequest( //~ w io.Writer, //~ config FormatterConfig, //~ auth *logical.Auth, //~ req *logical.Request, //~ inErr error) error { public void FormatRequest(Stream w, FormatterConfig config, Logical.Auth auth, Logical.Request req, Exception inErr) { //~ if req == nil { //~ return fmt.Errorf("request to request-audit a nil request") //~ } //~ //~ if w == nil { //~ return fmt.Errorf("writer for audit request is nil") //~ } //~ //~ if f.AuditFormatWriter == nil { //~ return fmt.Errorf("no format writer specified") //~ } if (req == null) { throw new ArgumentNullException(nameof(req)); } if (w == null) { throw new ArgumentNullException(nameof(w)); } //if (this.AuditFormatWriter == null) // throw new InvalidOperationException("no format writer present"); using (var defer = new Util.Defer()) { //~ if !config.Raw { if (!config.Raw) { // Before we copy the structure we must nil out some data // otherwise we will cause reflection to panic and die //~ if req.Connection != nil && req.Connection.ConnState != nil { //~ origReq := req //~ origState := req.Connection.ConnState //~ req.Connection.ConnState = nil //~ defer func() { //~ origReq.Connection.ConnState = origState //~ }() //~ } if (req?.Connection?.ConnectionState != null) { var origReq = req; var origState = req.Connection.ConnectionState; req.Connection.ConnectionState = null; defer.Add(() => origReq.Connection.ConnectionState = origState); } // Copy the auth structure //~ if auth != nil { //~ cp, err:= copystructure.Copy(auth) //~ if err != nil { //~ return err //~ } //~ auth = cp.(*logical.Auth) //~ } if (auth != null) { auth = auth.DeepCopy(); } //~ cp, err:= copystructure.Copy(req) //~ if err != nil { //~ return err //~ } //~ req = cp.(*logical.Request) req = req.DeepCopy(); // Hash any sensitive information //~ if auth != nil { //~ if err := Hash(config.Salt, auth); err != nil { //~ return err //~ } //~ } if (auth != null) { HashStructure.Hash(config.Salt, auth); } // Cache and restore accessor in the request //~ var clientTokenAccessor string //~ if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" { //~ clientTokenAccessor = req.ClientTokenAccessor //~ } //~ if err := Hash(config.Salt, req); err != nil { //~ return err //~ } //~ if clientTokenAccessor != "" { //~ req.ClientTokenAccessor = clientTokenAccessor //~ } var clientTokenAccessor = config.HMACAccessor ? null : req?.ClientTokenAccessor; HashStructure.Hash(config.Salt, req); if (!string.IsNullOrEmpty(clientTokenAccessor)) { req.ClientTokenAccessor = clientTokenAccessor; } } // If auth is nil, make an empty one //~ if auth == nil { //~ auth = new(logical.Auth) //~ } if (auth == null) { auth = new Logical.Auth(); } //~ var errString string //~ if inErr != nil { //~ errString = inErr.Error() //~ } string errString = null; if (inErr != null) { errString = inErr.Message; } //~reqEntry:= &AuditRequestEntry{ var reqEntry = new AuditRequestEntry { //~ Type: "request", //~ Error: errString, //~ //~ Auth: AuditAuth{ //~ DisplayName: auth.DisplayName, //~ Policies: auth.Policies, //~ Metadata: auth.Metadata, //~ }, Type = "request", Error = errString, Auth = new AuditAuth { DisplayName = auth.DisplayName, Policies = auth.Policies, Metadata = auth.Metadata, }, //~ Request: AuditRequest{ //~ ID: req.ID, //~ ClientToken: req.ClientToken, //~ ClientTokenAccessor: req.ClientTokenAccessor, //~ Operation: req.Operation, //~ Path: req.Path, //~ Data: req.Data, //~ RemoteAddr: getRemoteAddr(req), //~ ReplicationCluster: req.ReplicationCluster, //~ Headers: req.Headers, //~ }, Request = new AuditRequest { ID = req.ID, ClientToken = req.ClientToken, ClientTokenAccessor = req.ClientTokenAccessor, Operation = req.Operation, Path = req.Path, Data = req.Data, RemoteAddr = GetRemoteAddr(req), ReplicationCluster = req.ReplicationCluster, Headers = req.Headers, }, }; //~ if req.WrapInfo != nil { //~ reqEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) //~ } if (req.WrapInfo != null) { reqEntry.Request.WrapTTL = (int)req.WrapInfo.TTL.TotalSeconds; } //~ if !config.OmitTime { //~ reqEntry.Time = time.Now().UTC().Format(time.RFC3339) //~ } if (!config.OmitTime) { reqEntry.Time = DateTime.UtcNow.FormatUtcAsRFC3339(); } //~ return f.AuditFormatWriter.WriteRequest(w, reqEntry) this.WriteRequest(w, reqEntry); } }
//~ func (f *AuditFormatter) FormatResponse( //~ w io.Writer, //~ config FormatterConfig, //~ auth *logical.Auth, //~ req *logical.Request, //~ resp *logical.Response, //~ inErr error) error { public void FormatResponse(Stream w, FormatterConfig config, Logical.Auth auth, Logical.Request req, Logical.Response resp, Exception inErr) { //~ if req == nil { //~ return fmt.Errorf("request to response-audit a nil request") //~ } //~ //~ if w == nil { //~ return fmt.Errorf("writer for audit request is nil") //~ } //~ //~ if f.AuditFormatWriter == nil { //~ return fmt.Errorf("no format writer specified") //~ } if (req == null) { throw new ArgumentNullException(nameof(req)); } if (w == null) { throw new ArgumentNullException(nameof(w)); } using (var defer = new Util.Defer()) { //~ if !config.Raw { if (!config.Raw) { // Before we copy the structure we must nil out some data // otherwise we will cause reflection to panic and die //~ if req.Connection != nil && req.Connection.ConnState != nil { //~ origReq:= req //~ origState:= req.Connection.ConnState //~ req.Connection.ConnState = nil //~ defer func() { //~ origReq.Connection.ConnState = origState //~ } () //~ } if (req?.Connection?.ConnectionState != null) { var origReq = req; var origState = req.Connection.ConnectionState; req.Connection.ConnectionState = null; defer.Add(() => origReq.Connection.ConnectionState = origState); } // Copy the auth structure //! if auth != nil { //! cp, err:= copystructure.Copy(auth) //! if err != nil { //! return err //! } //! auth = cp.(*logical.Auth) //! } if (auth != null) { auth = auth.DeepCopy(); } //~ cp, err:= copystructure.Copy(req) //~ if err != nil { //~ return err //~ } //~ req = cp.(*logical.Request) req = req.DeepCopy(); //~ if resp != nil { //~ cp, err:= copystructure.Copy(resp) //~ if err != nil { //~ return err //~ } //~ resp = cp.(*logical.Response) //~ } if (resp != null) { resp = resp.DeepCopy(); } // Hash any sensitive information // Cache and restore accessor in the auth //~ if auth != nil { //~ var accessor string //~ if !config.HMACAccessor && auth.Accessor != "" { //~ accessor = auth.Accessor //~ } //~ if err := Hash(config.Salt, auth); err != nil { //~ return err //~ } //~ if accessor != "" { //~ auth.Accessor = accessor //~ } //~ } if (auth != null) { var accessor = config.HMACAccessor ? null : auth.Accessor; HashStructure.Hash(config.Salt, auth); if (!string.IsNullOrEmpty(accessor)) { auth.Accessor = accessor; } } // Cache and restore accessor in the request //~ var clientTokenAccessor string //~ if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" { //~ clientTokenAccessor = req.ClientTokenAccessor //~ } //~ if err := Hash(config.Salt, req); err != nil { //~ return err //~ } //~ if clientTokenAccessor != "" { //~ req.ClientTokenAccessor = clientTokenAccessor //~ } var clientTokenAccessor = config.HMACAccessor ? null : req?.ClientTokenAccessor; HashStructure.Hash(config.Salt, req); if (!string.IsNullOrEmpty(clientTokenAccessor)) { req.ClientTokenAccessor = clientTokenAccessor; } // Cache and restore accessor in the response //~ if resp != nil { //~ var accessor, wrappedAccessor string //~ if !config.HMACAccessor && resp != nil && resp.Auth != nil && resp.Auth.Accessor != "" { //~ accessor = resp.Auth.Accessor //~ } //~ if !config.HMACAccessor && resp != nil && resp.WrapInfo != nil && resp.WrapInfo.WrappedAccessor != "" { //~ wrappedAccessor = resp.WrapInfo.WrappedAccessor //~ } //~ if err := Hash(config.Salt, resp); err != nil { //~ return err //~ } //~ if accessor != "" { //~ resp.Auth.Accessor = accessor //~ } //~ if wrappedAccessor != "" { //~ resp.WrapInfo.WrappedAccessor = wrappedAccessor //~ } //~ } if (resp != null) { var accessor = config.HMACAccessor ? null : resp?.Auth?.Accessor; var wrappedAccessor = config.HMACAccessor ? null : resp?.WrapInfo?.WrappedAccessor; HashStructure.Hash(config.Salt, resp); if (!string.IsNullOrEmpty(accessor)) { resp.Auth.Accessor = accessor; } if (!string.IsNullOrEmpty(wrappedAccessor)) { resp.WrapInfo.WrappedAccessor = wrappedAccessor; } } } // If things are nil, make empty to avoid panics //~ if auth == nil { //~ auth = new(logical.Auth) //~ } //~ if resp == nil { //~ resp = new(logical.Response) //~ } //~ var errString string //~ if inErr != nil { //~ errString = inErr.Error() //~ } if (auth == null) { auth = new Logical.Auth(); } if (resp == null) { resp = new Logical.Response(); } string errString = null; if (inErr != null) { errString = inErr.Message; } //~ var respAuth *AuditAuth //~ if resp.Auth != nil { //~ respAuth = &AuditAuth{ //~ ClientToken: resp.Auth.ClientToken, //~ Accessor: resp.Auth.Accessor, //~ DisplayName: resp.Auth.DisplayName, //~ Policies: resp.Auth.Policies, //~ Metadata: resp.Auth.Metadata, //~ } //~ } AuditAuth respAuth = null; if (resp.Auth != null) { respAuth = new AuditAuth { ClientToken = resp.Auth.ClientToken, Accessor = resp.Auth.Accessor, DisplayName = resp.Auth.DisplayName, Policies = resp.Auth.Policies, Metadata = resp.Auth.Metadata, }; } //~ var respSecret *AuditSecret //~ if resp.Secret != nil { //~ respSecret = &AuditSecret{ //~ LeaseID: resp.Secret.LeaseID, //~ } //~ } AuditSecret respSecret = null; if (resp.Secret != null) { respSecret = new AuditSecret { LeaseID = resp.Secret.LeaseID, } } ; //~ var respWrapInfo *AuditResponseWrapInfo //~ if resp.WrapInfo != nil { //~ token := resp.WrapInfo.Token //~ if jwtToken := parseVaultTokenFromJWT(token); jwtToken != nil { //~ token = *jwtToken //~ } //~ respWrapInfo = &AuditResponseWrapInfo{ //~ TTL: int(resp.WrapInfo.TTL / time.Second), //~ Token: token, //~ CreationTime: resp.WrapInfo.CreationTime.Format(time.RFC3339Nano), //~ WrappedAccessor: resp.WrapInfo.WrappedAccessor, //~ } //~ } AuditResponseWrapInfo respWrapInfo = null; if (resp.WrapInfo != null) { var token = resp.WrapInfo.Token; var jwtToken = ParseVaultTokenFromJWT(token); if (!string.IsNullOrEmpty(jwtToken)) { token = jwtToken; } respWrapInfo = new AuditResponseWrapInfo { TTL = (int)resp.WrapInfo.TTL.TotalSeconds, Token = token, CreationTime = resp.WrapInfo.CreationTime.FormatUtcAsRFC3339Nano(), WrappedAccessor = resp.WrapInfo.WrappedAccessor, }; } //~ respEntry := &AuditResponseEntry{ var respEntry = new AuditResponseEntry { //~ Type: "response", //~ Error: errString, Type = "response", Error = errString, //~ Auth: AuditAuth{ //~ DisplayName: auth.DisplayName, //~ Policies: auth.Policies, //~ Metadata: auth.Metadata, //~ }, Auth = new AuditAuth { DisplayName = auth.DisplayName, Policies = auth.Policies, Metadata = auth.Metadata, }, //~ Request: AuditRequest{ //~ ID: req.ID, //~ ClientToken: req.ClientToken, //~ ClientTokenAccessor: req.ClientTokenAccessor, //~ Operation: req.Operation, //~ Path: req.Path, //~ Data: req.Data, //~ RemoteAddr: getRemoteAddr(req), //~ ReplicationCluster: req.ReplicationCluster, //~ Headers: req.Headers, //~ }, Request = new AuditRequest { ID = req.ID, ClientToken = req.ClientToken, ClientTokenAccessor = req.ClientTokenAccessor, Operation = req.Operation, Path = req.Path, Data = req.Data, RemoteAddr = GetRemoteAddr(req), ReplicationCluster = req.ReplicationCluster, Headers = req.Headers, }, //~ Response: AuditResponse{ //~ Auth: respAuth, //~ Secret: respSecret, //~ Data: resp.Data, //~ Redirect: resp.Redirect, //~ WrapInfo: respWrapInfo, //~ }, Response = new AuditResponse { Auth = respAuth, Secret = respSecret, Data = resp.Data, Redirect = resp.Redirect, WrapInfo = respWrapInfo, }, }; //~ if req.WrapInfo != nil { //~ respEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) //~ } if (req.WrapInfo != null) { respEntry.Request.WrapTTL = (int)req.WrapInfo.TTL.TotalSeconds; } //~ if !config.OmitTime { //~ respEntry.Time = time.Now().UTC().Format(time.RFC3339) //~ } if (!config.OmitTime) { respEntry.Time = DateTime.UtcNow.FormatUtcAsRFC3339(); } //~ return f.AuditFormatWriter.WriteResponse(w, respEntry) WriteResponse(w, respEntry); } }
// Mount is used to mount a new backend to the mount table. //~ func (c *Core) mount(entry *MountEntry) error { static void mount(this Core c, MountEntry entry) { using (var defer = new Util.Defer()) { // Ensure we end the path in a slash //~ if !strings.HasSuffix(entry.Path, "/") { //~ entry.Path += "/" //~ } if (!entry.Path.EndsWith("/")) { entry.Path += "/"; } // Prevent protected paths from being mounted //~ for _, p := range protectedMounts { //~ if strings.HasPrefix(entry.Path, p) { //~ return logical.CodedError(403, fmt.Sprintf("cannot mount '%s'", entry.Path)) //~ } //~ } foreach (var p in Globals.protectedMounts) { if (entry.Path.EndsWith(p)) { throw new Logical.CodedError(403, $"cannot mount [{entry.Path}]"); } } // Do not allow more than one instance of a singleton mount //~ for _, p := range singletonMounts { //~ if entry.Type == p { //~ return logical.CodedError(403, fmt.Sprintf("Cannot mount more than one instance of '%s'", entry.Type)) //~ } //~ } foreach (var p in Globals.singletonMounts) { if (entry.Type == p) { throw new Logical.CodedError(403, $"Cannot mount more than one instance of [{entry.Type}]"); } } //~ c.mountsLock.Lock() //~ defer c.mountsLock.Unlock() c.mountsLock.EnterWriteLock(); defer.Add(() => c.mountsLock.ExitWriteLock()); // Verify there is no conflicting mount //~ if match := c.router.MatchingMount(entry.Path); match != "" { //~ return logical.CodedError(409, fmt.Sprintf("existing mount at %s", match)) //~ } var match = c.router.MatchingMount(entry.Path); if (!string.IsNullOrEmpty(match)) { throw new Logical.CodedError(409, $"existing mount at [{match}]"); } // Generate a new UUID and view //~ if entry.UUID == "" { //~ entryUUID, err := uuid.GenerateUUID() //~ if err != nil { //~ return err //~ } //~ entry.UUID = entryUUID //~ } //~ viewPath := backendBarrierPrefix + entry.UUID + "/" //~ view := NewBarrierView(c.barrier, viewPath) //~ sysView := c.mountEntrySysView(entry) if (string.IsNullOrEmpty(entry.UUID)) { entry.UUID = Guid.NewGuid().ToString(); } var viewPath = $"{Constants.backendBarrierPrefix}{entry.UUID}/"; var view = BarrierView.NewBarrierView(c.barrier, viewPath); var sysView = c.mountEntrySysView(entry); //~ backend, err := c.newLogicalBackend(entry.Type, sysView, view, nil) //~ if err != nil { //~ return err //~ } var backend = c.newLogicalBackend(entry.Type, sysView, view, null); // Call initialize; this takes care of init tasks that must be run after // the ignore paths are collected //~ if err := backend.Initialize(); err != nil { //~ return err //~ } backend.Initialize(); //~ newTable := c.mounts.shallowClone() //~ newTable.Entries = append(newTable.Entries, entry) //~ if err := c.persistMounts(newTable); err != nil { //~ c.logger.Error("core: failed to update mount table", "error", err) //~ return logical.CodedError(500, "failed to update mount table") //~ } //~ c.mounts = newTable var newTable = c.mounts.shallowClone(); newTable.Entries.Add(entry); try { c.persistMounts(newTable); } catch (Exception ex) { c.logger.LogError("core: failed to update mount table", ex); throw new Logical.CodedError(500, "failed to update mount table"); } if err := c.router.Mount(backend, entry.Path, entry, view); err != nil { return err } if c.logger.IsInfo() { c.logger.Info("core: successful mount", "path", entry.Path, "type", entry.Type) } return nil } }
public static void TestBackend_ListPrefix(IBackend b) { //~ e1 := &Entry{Key: "foo", Value: []byte("test")} //~ e2 := &Entry{Key: "foo/bar", Value: []byte("test")} //~ e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")} var e1 = new Entry { Key = "foo", Value = "test".ToUtf8Bytes() }; var e2 = new Entry { Key = "foo/bar", Value = "test".ToUtf8Bytes() }; var e3 = new Entry { Key = "foo/bar/baz", Value = "test".ToUtf8Bytes() }; //~ defer func() { //~ b.Delete("foo") //~ b.Delete("foo/bar") //~ b.Delete("foo/bar/baz") //~ }() using (var defer = new Util.Defer()) { defer.Add(() => { b.Delete("foo"); b.Delete("foo/bar"); b.Delete("foo/bar/baz"); }); //~ err := b.Put(e1) //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ err = b.Put(e2) //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ err = b.Put(e3) //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } b.Put(e1); b.Put(e2); b.Put(e3); // Scan the root //~ keys, err := b.List("") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ if len(keys) != 2 { //~ t.Fatalf("bad: %v", keys) //~ } //~ sort.Strings(keys) //~ if keys[0] != "foo" { //~ t.Fatalf("bad: %v", keys) //~ } //~ if keys[1] != "foo/" { //~ t.Fatalf("bad: %v", keys) //~ } var keys = b.List(""); Assert.AreEqual(2, keys.Count()); keys = keys.OrderBy(x => x); Assert.AreEqual("foo", keys.ElementAt(0)); Assert.AreEqual("foo/", keys.ElementAt(1)); // Scan foo/ //~ keys, err = b.List("foo/") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ if len(keys) != 2 { //~ t.Fatalf("bad: %v", keys) //~ } //~ sort.Strings(keys) //~ if keys[0] != "bar" { //~ t.Fatalf("bad: %v", keys) //~ } //~ if keys[1] != "bar/" { //~ t.Fatalf("bad: %v", keys) //~ } keys = b.List("foo/"); Assert.AreEqual(2, keys.Count()); keys = keys.OrderBy(x => x); Assert.AreEqual("bar", keys.ElementAt(0)); Assert.AreEqual("bar/", keys.ElementAt(1)); // Scan foo/bar/ //~ keys, err = b.List("foo/bar/") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ sort.Strings(keys) //~ if len(keys) != 1 { //~ t.Fatalf("bad: %v", keys) //~ } //~ if keys[0] != "baz" { //~ t.Fatalf("bad: %v", keys) //~ } keys = b.List("foo/bar/"); keys = keys.OrderBy(x => x); Assert.AreEqual(1, keys.Count()); Assert.AreEqual("baz", keys.ElementAt(0)); } }
//~ func TestFileBackend_Base64URLEncoding(t *testing.T) { public void TestFileBackend_Base64UrlEncoding() { using (var defer = new Util.Defer()) using (var lf = VaultLogger.CreateLoggerFactory(LogLevel.Trace)) { //backendPath, err:= ioutil.TempDir("", "vault") //if err != nil { // t.Fatalf("err: %s", err) //} //defer os.RemoveAll(backendPath) var backendPath = Util.IO.Directory.CreateTempDir(prefix: "vault"); defer.Add(() => { Debug.WriteLine($"WOULD DELETE ALL AT: [{backendPath}]"); //Directory.Delete(backendPath, true); }); //~ logger:= logformat.NewVaultLogger(log.LevelTrace) var logger = lf.CreateLogger <FileBackend>(); //~ b, err:= NewBackend("file", logger, map[string]string{ //~ "path": backendPath, //~ }) //~ if err != nil { //~ t.Fatalf("err: %s", err) //~ } var b = Global.NewBackend("file", logger, new ConfigMap <string> { ["path"] = backendPath, }); // List the entries. Length should be zero. //~ keys, err:= b.List("") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ if len(keys) != 0 { //~ t.Fatalf("bad: len(keys): expected: 0, actual: %d", len(keys)) //~ } var keys = b.List(""); Assert.AreEqual(0, keys.Count()); // Create a storage entry without base64 encoding the file name //~ rawFullPath:= filepath.Join(backendPath, "_foo") //~ e:= &Entry{ Key: "foo", Value: []byte("test")} //~ f, err:= os.OpenFile( //~ rawFullPath, //~ os.O_CREATE | os.O_TRUNC | os.O_WRONLY, //~ 0600) //~ if err != nil { //~ t.Fatal(err) //~ } //~ json.NewEncoder(f).Encode(e) //~ f.Close() var rawFullPath = Path.Combine(backendPath, "_foo"); var e = new Entry { Key = "foo", Value = "test".ToUtf8Bytes() }; using (var fs = new FileStream(rawFullPath, FileMode.Create, FileAccess.Write)) { var bytes = JsonConvert.SerializeObject(e).ToUtf8Bytes(); fs.Write(bytes, 0, bytes.Length); } // Get should work //~ out, err:= b.Get("foo") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ if !reflect.DeepEqual(out, e) { //~ t.Fatalf("bad: %v expected: %v", out, e) //~ } var @out = b.Get("foo"); var compare = new CompareLogic(); Assert.IsTrue(compare.Compare(e, @out).AreEqual); // List the entries. There should be one entry. //~ keys, err = b.List("") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ if len(keys) != 1 { //~ t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys)) //~ } keys = b.List(""); Assert.AreEqual(1, keys.Count()); //~ err = b.Put(e) //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } b.Put(e); // List the entries again. There should still be one entry. //~ keys, err = b.List("") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ if len(keys) != 1 { //~ t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys)) //~ } keys = b.List(""); Assert.AreEqual(1, keys.Count()); // Get should work //~ out, err = b.Get("foo") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ if !reflect.DeepEqual(out, e) { //~ t.Fatalf("bad: %v expected: %v", out, e) //~ } @out = b.Get("foo"); Assert.IsTrue(compare.Compare(e, @out).AreEqual); //~ err = b.Delete("foo") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } b.Delete("foo"); //~ out, err = b.Get("foo") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ if out != nil { //~ t.Fatalf("bad: entry: expected: nil, actual: %#v", e) //~ } @out = b.Get("foo"); Assert.IsNull(@out); //~ keys, err = b.List("") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ if len(keys) != 0 { //~ t.Fatalf("bad: len(keys): expected: 0, actual: %d", len(keys)) //~ } keys = b.List(""); Assert.AreEqual(0, keys.Count()); //~ f, err = os.OpenFile( //~ rawFullPath, //~ os.O_CREATE | os.O_TRUNC | os.O_WRONLY, //~ 0600) //~ if err != nil { //~ t.Fatal(err) //~ } //~ json.NewEncoder(f).Encode(e) //~ f.Close() using (var fs = new FileStream(rawFullPath, FileMode.Create, FileAccess.Write)) { var bytes = JsonConvert.SerializeObject(e).ToUtf8Bytes(); fs.Write(bytes, 0, bytes.Length); } //~ keys, err = b.List("") //~ if err != nil { //~ t.Fatalf("err: %v", err) //~ } //~ if len(keys) != 1 { //~ t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys)) //~ } keys = b.List(""); Assert.AreEqual(1, keys.Count()); } }