/// <exception cref="System.IO.IOException"/> public override void WriteBlock(ExtendedBlock blk, StorageType storageType, Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> blockToken, string clientName, DatanodeInfo[] targets, StorageType [] targetStorageTypes, DatanodeInfo source, BlockConstructionStage stage, int pipelineSize , long minBytesRcvd, long maxBytesRcvd, long latestGenerationStamp, DataChecksum requestedChecksum, CachingStrategy cachingStrategy, bool allowLazyPersist, bool pinning, bool[] targetPinnings) { DataTransferProtos.ClientOperationHeaderProto header = DataTransferProtoUtil.BuildClientHeader (blk, clientName, blockToken); DataTransferProtos.ChecksumProto checksumProto = DataTransferProtoUtil.ToProto(requestedChecksum ); DataTransferProtos.OpWriteBlockProto.Builder proto = DataTransferProtos.OpWriteBlockProto .NewBuilder().SetHeader(header).SetStorageType(PBHelper.ConvertStorageType(storageType )).AddAllTargets(PBHelper.Convert(targets, 1)).AddAllTargetStorageTypes(PBHelper .ConvertStorageTypes(targetStorageTypes, 1)).SetStage(DataTransferProtoUtil.ToProto (stage)).SetPipelineSize(pipelineSize).SetMinBytesRcvd(minBytesRcvd).SetMaxBytesRcvd (maxBytesRcvd).SetLatestGenerationStamp(latestGenerationStamp).SetRequestedChecksum (checksumProto).SetCachingStrategy(GetCachingStrategy(cachingStrategy)).SetAllowLazyPersist (allowLazyPersist).SetPinning(pinning).AddAllTargetPinnings(PBHelper.Convert(targetPinnings , 1)); if (source != null) { proto.SetSource(PBHelper.ConvertDatanodeInfo(source)); } Send(@out, OP.WriteBlock, ((DataTransferProtos.OpWriteBlockProto)proto.Build())); }
/// <exception cref="System.IO.IOException"/> public virtual DatanodeCommand CacheReport(DatanodeRegistration registration, string poolId, IList <long> blockIds) { DatanodeProtocolProtos.CacheReportRequestProto.Builder builder = DatanodeProtocolProtos.CacheReportRequestProto .NewBuilder().SetRegistration(PBHelper.Convert(registration)).SetBlockPoolId(poolId ); foreach (long blockId in blockIds) { builder.AddBlocks(blockId); } DatanodeProtocolProtos.CacheReportResponseProto resp; try { resp = rpcProxy.CacheReport(NullController, ((DatanodeProtocolProtos.CacheReportRequestProto )builder.Build())); } catch (ServiceException se) { throw ProtobufHelper.GetRemoteException(se); } if (resp.HasCmd()) { return(PBHelper.Convert(resp.GetCmd())); } return(null); }
/// <exception cref="System.IO.IOException"/> public virtual void BlockReceivedAndDeleted(DatanodeRegistration registration, string poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) { DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.Builder builder = DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto .NewBuilder().SetRegistration(PBHelper.Convert(registration)).SetBlockPoolId(poolId ); foreach (StorageReceivedDeletedBlocks storageBlock in receivedAndDeletedBlocks) { DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder repBuilder = DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto .NewBuilder(); repBuilder.SetStorageUuid(storageBlock.GetStorage().GetStorageID()); // Set for wire compatibility. repBuilder.SetStorage(PBHelper.Convert(storageBlock.GetStorage())); foreach (ReceivedDeletedBlockInfo rdBlock in storageBlock.GetBlocks()) { repBuilder.AddBlocks(PBHelper.Convert(rdBlock)); } builder.AddBlocks(((DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto)repBuilder .Build())); } try { rpcProxy.BlockReceivedAndDeleted(NullController, ((DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto )builder.Build())); } catch (ServiceException se) { throw ProtobufHelper.GetRemoteException(se); } }
/// <summary>Create a new encryption zone.</summary> /// <remarks> /// Create a new encryption zone. /// <p/> /// Called while holding the FSDirectory lock. /// </remarks> /// <exception cref="System.IO.IOException"/> internal virtual XAttr CreateEncryptionZone(string src, CipherSuite suite, CryptoProtocolVersion version, string keyName) { System.Diagnostics.Debug.Assert(dir.HasWriteLock()); INodesInPath srcIIP = dir.GetINodesInPath4Write(src, false); if (dir.IsNonEmptyDirectory(srcIIP)) { throw new IOException("Attempt to create an encryption zone for a non-empty directory." ); } if (srcIIP != null && srcIIP.GetLastINode() != null && !srcIIP.GetLastINode().IsDirectory ()) { throw new IOException("Attempt to create an encryption zone for a file."); } EncryptionZoneManager.EncryptionZoneInt ezi = GetEncryptionZoneForPath(srcIIP); if (ezi != null) { throw new IOException("Directory " + src + " is already in an " + "encryption zone. (" + GetFullPathName(ezi) + ")"); } HdfsProtos.ZoneEncryptionInfoProto proto = PBHelper.Convert(suite, version, keyName ); XAttr ezXAttr = XAttrHelper.BuildXAttr(HdfsServerConstants.CryptoXattrEncryptionZone , proto.ToByteArray()); IList <XAttr> xattrs = Lists.NewArrayListWithCapacity(1); xattrs.AddItem(ezXAttr); // updating the xattr will call addEncryptionZone, // done this way to handle edit log loading FSDirXAttrOp.UnprotectedSetXAttrs(dir, src, xattrs, EnumSet.Of(XAttrSetFlag.Create )); return(ezXAttr); }
/// <exception cref="Com.Google.Protobuf.ServiceException"/> public virtual DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto CommitBlockSynchronization (RpcController controller, DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request) { IList <HdfsProtos.DatanodeIDProto> dnprotos = request.GetNewTaragetsList(); DatanodeID[] dns = new DatanodeID[dnprotos.Count]; for (int i = 0; i < dnprotos.Count; i++) { dns[i] = PBHelper.Convert(dnprotos[i]); } IList <string> sidprotos = request.GetNewTargetStoragesList(); string[] storageIDs = Sharpen.Collections.ToArray(sidprotos, new string[sidprotos .Count]); try { impl.CommitBlockSynchronization(PBHelper.Convert(request.GetBlock()), request.GetNewGenStamp (), request.GetNewLength(), request.GetCloseFile(), request.GetDeleteBlock(), dns , storageIDs); } catch (IOException e) { throw new ServiceException(e); } return(VoidCommitBlockSynchronizationResponseProto); }
public virtual void TestConvertBlockRecoveryCommand() { DatanodeInfo di1 = DFSTestUtil.GetLocalDatanodeInfo(); DatanodeInfo di2 = DFSTestUtil.GetLocalDatanodeInfo(); DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 }; IList <BlockRecoveryCommand.RecoveringBlock> blks = ImmutableList.Of(new BlockRecoveryCommand.RecoveringBlock (GetExtendedBlock(1), dnInfo, 3), new BlockRecoveryCommand.RecoveringBlock(GetExtendedBlock (2), dnInfo, 3)); BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks); DatanodeProtocolProtos.BlockRecoveryCommandProto proto = PBHelper.Convert(cmd); NUnit.Framework.Assert.AreEqual(1, proto.GetBlocks(0).GetBlock().GetB().GetBlockId ()); NUnit.Framework.Assert.AreEqual(2, proto.GetBlocks(1).GetBlock().GetB().GetBlockId ()); BlockRecoveryCommand cmd2 = PBHelper.Convert(proto); IList <BlockRecoveryCommand.RecoveringBlock> cmd2Blks = Lists.NewArrayList(cmd2.GetRecoveringBlocks ()); NUnit.Framework.Assert.AreEqual(blks[0].GetBlock(), cmd2Blks[0].GetBlock()); NUnit.Framework.Assert.AreEqual(blks[1].GetBlock(), cmd2Blks[1].GetBlock()); NUnit.Framework.Assert.AreEqual(Joiner.On(",").Join(blks), Joiner.On(",").Join(cmd2Blks )); NUnit.Framework.Assert.AreEqual(cmd.ToString(), cmd2.ToString()); }
public static DataTransferProtos.ChecksumProto ToProto(DataChecksum checksum) { HdfsProtos.ChecksumTypeProto type = PBHelper.Convert(checksum.GetChecksumType()); // ChecksumType#valueOf never returns null return((DataTransferProtos.ChecksumProto)DataTransferProtos.ChecksumProto.NewBuilder ().SetBytesPerChecksum(checksum.GetBytesPerChecksum()).SetType(type).Build()); }
Answer(InvocationOnMock invocation) { object[] args = invocation.GetArguments(); NUnit.Framework.Assert.AreEqual(2, args.Length); ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto req = (ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto )args[1]; ICollection <TokenIdentifier> tokenIds = UserGroupInformation.GetCurrentUser().GetTokenIdentifiers (); NUnit.Framework.Assert.AreEqual("Only one BlockTokenIdentifier expected", 1, tokenIds .Count); long result = 0; foreach (TokenIdentifier tokenId in tokenIds) { BlockTokenIdentifier id = (BlockTokenIdentifier)tokenId; Log.Info("Got: " + id.ToString()); NUnit.Framework.Assert.IsTrue("Received BlockTokenIdentifier is wrong", ident.Equals (id)); sm.CheckAccess(id, null, PBHelper.Convert(req.GetBlock()), BlockTokenSecretManager.AccessMode .Write); result = id.GetBlockId(); } return((ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto)ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto .NewBuilder().SetLength(result).Build()); }
/// <exception cref="Com.Google.Protobuf.ServiceException"/> public virtual InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto InitReplicaRecovery (RpcController unused, InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request) { BlockRecoveryCommand.RecoveringBlock b = PBHelper.Convert(request.GetBlock()); ReplicaRecoveryInfo r; try { r = impl.InitReplicaRecovery(b); } catch (IOException e) { throw new ServiceException(e); } if (r == null) { return((InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto)InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto .NewBuilder().SetReplicaFound(false).Build()); } else { return((InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto)InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto .NewBuilder().SetReplicaFound(true).SetBlock(PBHelper.Convert(r)).SetState(PBHelper .Convert(r.GetOriginalReplicaState())).Build()); } }
/// <exception cref="System.IO.IOException"/> private void SerializeFileDiffList(INodeFile file, OutputStream @out) { FileWithSnapshotFeature sf = file.GetFileWithSnapshotFeature(); if (sf != null) { IList <FileDiff> diffList = sf.GetDiffs().AsList(); FsImageProto.SnapshotDiffSection.DiffEntry entry = ((FsImageProto.SnapshotDiffSection.DiffEntry )FsImageProto.SnapshotDiffSection.DiffEntry.NewBuilder().SetInodeId(file.GetId() ).SetType(FsImageProto.SnapshotDiffSection.DiffEntry.Type.Filediff).SetNumOfDiff (diffList.Count).Build()); entry.WriteDelimitedTo(@out); for (int i = diffList.Count - 1; i >= 0; i--) { FileDiff diff = diffList[i]; FsImageProto.SnapshotDiffSection.FileDiff.Builder fb = FsImageProto.SnapshotDiffSection.FileDiff .NewBuilder().SetSnapshotId(diff.GetSnapshotId()).SetFileSize(diff.GetFileSize() ); if (diff.GetBlocks() != null) { foreach (Block block in diff.GetBlocks()) { fb.AddBlocks(PBHelper.Convert(block)); } } INodeFileAttributes copy = diff.snapshotINode; if (copy != null) { fb.SetName(ByteString.CopyFrom(copy.GetLocalNameBytes())).SetSnapshotCopy(FSImageFormatPBINode.Saver.BuildINodeFile (copy, parent.GetSaverContext())); } ((FsImageProto.SnapshotDiffSection.FileDiff)fb.Build()).WriteDelimitedTo(@out); } } }
/// <exception cref="System.IO.IOException"/> public virtual ReplicaRecoveryInfo InitReplicaRecovery(BlockRecoveryCommand.RecoveringBlock rBlock) { InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto req = ((InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto )InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.NewBuilder().SetBlock (PBHelper.Convert(rBlock)).Build()); InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto resp; try { resp = rpcProxy.InitReplicaRecovery(NullController, req); } catch (ServiceException e) { throw ProtobufHelper.GetRemoteException(e); } if (!resp.GetReplicaFound()) { // No replica found on the remote node. return(null); } else { if (!resp.HasBlock() || !resp.HasState()) { throw new IOException("Replica was found but missing fields. " + "Req: " + req + "\n" + "Resp: " + resp); } } HdfsProtos.BlockProto b = resp.GetBlock(); return(new ReplicaRecoveryInfo(b.GetBlockId(), b.GetNumBytes(), b.GetGenStamp(), PBHelper.Convert(resp.GetState()))); }
/// <summary>Receive OP_WRITE_BLOCK</summary> /// <exception cref="System.IO.IOException"/> private void OpWriteBlock(DataInputStream @in) { DataTransferProtos.OpWriteBlockProto proto = DataTransferProtos.OpWriteBlockProto .ParseFrom(PBHelper.VintPrefixed(@in)); DatanodeInfo[] targets = PBHelper.Convert(proto.GetTargetsList()); TraceScope traceScope = DataTransferProtoUtil.ContinueTraceSpan(proto.GetHeader() , proto.GetType().Name); try { WriteBlock(PBHelper.Convert(proto.GetHeader().GetBaseHeader().GetBlock()), PBHelper .ConvertStorageType(proto.GetStorageType()), PBHelper.Convert(proto.GetHeader(). GetBaseHeader().GetToken()), proto.GetHeader().GetClientName(), targets, PBHelper .ConvertStorageTypes(proto.GetTargetStorageTypesList(), targets.Length), PBHelper .Convert(proto.GetSource()), DataTransferProtoUtil.FromProto(proto.GetStage()), proto.GetPipelineSize(), proto.GetMinBytesRcvd(), proto.GetMaxBytesRcvd(), proto .GetLatestGenerationStamp(), DataTransferProtoUtil.FromProto(proto.GetRequestedChecksum ()), (proto.HasCachingStrategy() ? GetCachingStrategy(proto.GetCachingStrategy() ) : CachingStrategy.NewDefaultStrategy()), (proto.HasAllowLazyPersist() ? proto. GetAllowLazyPersist() : false), (proto.HasPinning() ? proto.GetPinning() : false ), (PBHelper.ConvertBooleanList(proto.GetTargetPinningsList()))); } finally { if (traceScope != null) { traceScope.Close(); } } }
/// <summary>Read SASL message and negotiated cipher option from server.</summary> /// <param name="in">stream to read</param> /// <returns> /// SaslResponseWithNegotiatedCipherOption SASL message and /// negotiated cipher option /// </returns> /// <exception cref="System.IO.IOException">for any error</exception> public static SaslResponseWithNegotiatedCipherOption ReadSaslMessageAndNegotiatedCipherOption (InputStream @in) { DataTransferProtos.DataTransferEncryptorMessageProto proto = DataTransferProtos.DataTransferEncryptorMessageProto .ParseFrom(PBHelper.VintPrefixed(@in)); if (proto.GetStatus() == DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus .ErrorUnknownKey) { throw new InvalidEncryptionKeyException(proto.GetMessage()); } else { if (proto.GetStatus() == DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus .Error) { throw new IOException(proto.GetMessage()); } else { byte[] response = proto.GetPayload().ToByteArray(); IList <CipherOption> options = PBHelper.ConvertCipherOptionProtos(proto.GetCipherOptionList ()); CipherOption option = null; if (options != null && !options.IsEmpty()) { option = options[0]; } return(new SaslResponseWithNegotiatedCipherOption(response, option)); } } }
/// <summary>Reads a SASL negotiation message and negotiation cipher options.</summary> /// <param name="in">stream to read</param> /// <param name="cipherOptions">list to store negotiation cipher options</param> /// <returns>byte[] SASL negotiation message</returns> /// <exception cref="System.IO.IOException">for any error</exception> public static byte[] ReadSaslMessageAndNegotiationCipherOptions(InputStream @in, IList <CipherOption> cipherOptions) { DataTransferProtos.DataTransferEncryptorMessageProto proto = DataTransferProtos.DataTransferEncryptorMessageProto .ParseFrom(PBHelper.VintPrefixed(@in)); if (proto.GetStatus() == DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus .ErrorUnknownKey) { throw new InvalidEncryptionKeyException(proto.GetMessage()); } else { if (proto.GetStatus() == DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus .Error) { throw new IOException(proto.GetMessage()); } else { IList <HdfsProtos.CipherOptionProto> optionProtos = proto.GetCipherOptionList(); if (optionProtos != null) { foreach (HdfsProtos.CipherOptionProto optionProto in optionProtos) { cipherOptions.AddItem(PBHelper.Convert(optionProto)); } } return(proto.GetPayload().ToByteArray()); } } }
public virtual void TestConvertBlockWithLocations() { BlocksWithLocations.BlockWithLocations locs = GetBlockWithLocations(1); HdfsProtos.BlockWithLocationsProto locsProto = PBHelper.Convert(locs); BlocksWithLocations.BlockWithLocations locs2 = PBHelper.Convert(locsProto); Compare(locs, locs2); }
/// <exception cref="System.IO.IOException"/> public RemoteEditLogManifest Call() { QJournalProtocolProtos.GetEditLogManifestResponseProto ret = this._enclosing.GetProxy ().GetEditLogManifest(this._enclosing.journalId, fromTxnId, inProgressOk); // Update the http port, since we need this to build URLs to any of the // returned logs. this._enclosing.ConstructHttpServerURI(ret); return(PBHelper.Convert(ret.GetManifest())); }
public virtual void TestConvertBlockKey() { BlockKey key = GetBlockKey(1); HdfsProtos.BlockKeyProto keyProto = PBHelper.Convert(key); BlockKey key1 = PBHelper.Convert(keyProto); Compare(key, key1); }
public virtual void TestConvertBlock() { Block b = new Block(1, 100, 3); HdfsProtos.BlockProto bProto = PBHelper.Convert(b); Block b2 = PBHelper.Convert(bProto); NUnit.Framework.Assert.AreEqual(b, b2); }
/// <exception cref="System.IO.IOException"/> public virtual HeartbeatResponse SendHeartbeat(DatanodeRegistration registration, StorageReport[] reports, long cacheCapacity, long cacheUsed, int xmitsInProgress , int xceiverCount, int failedVolumes, VolumeFailureSummary volumeFailureSummary ) { DatanodeProtocolProtos.HeartbeatRequestProto.Builder builder = DatanodeProtocolProtos.HeartbeatRequestProto .NewBuilder().SetRegistration(PBHelper.Convert(registration)).SetXmitsInProgress (xmitsInProgress).SetXceiverCount(xceiverCount).SetFailedVolumes(failedVolumes); builder.AddAllReports(PBHelper.ConvertStorageReports(reports)); if (cacheCapacity != 0) { builder.SetCacheCapacity(cacheCapacity); } if (cacheUsed != 0) { builder.SetCacheUsed(cacheUsed); } if (volumeFailureSummary != null) { builder.SetVolumeFailureSummary(PBHelper.ConvertVolumeFailureSummary(volumeFailureSummary )); } DatanodeProtocolProtos.HeartbeatResponseProto resp; try { resp = rpcProxy.SendHeartbeat(NullController, ((DatanodeProtocolProtos.HeartbeatRequestProto )builder.Build())); } catch (ServiceException se) { throw ProtobufHelper.GetRemoteException(se); } DatanodeCommand[] cmds = new DatanodeCommand[resp.GetCmdsList().Count]; int index = 0; foreach (DatanodeProtocolProtos.DatanodeCommandProto p in resp.GetCmdsList()) { cmds[index] = PBHelper.Convert(p); index++; } RollingUpgradeStatus rollingUpdateStatus = null; // Use v2 semantics if available. if (resp.HasRollingUpgradeStatusV2()) { rollingUpdateStatus = PBHelper.Convert(resp.GetRollingUpgradeStatusV2()); } else { if (resp.HasRollingUpgradeStatus()) { rollingUpdateStatus = PBHelper.Convert(resp.GetRollingUpgradeStatus()); } } return(new HeartbeatResponse(cmds, PBHelper.Convert(resp.GetHaStatus()), rollingUpdateStatus )); }
public virtual void TestConvertDatanodeID() { DatanodeID dn = DFSTestUtil.GetLocalDatanodeID(); HdfsProtos.DatanodeIDProto dnProto = PBHelper.Convert(dn); DatanodeID dn2 = PBHelper.Convert(dnProto); Compare(dn, dn2); }
public virtual void TestConvertLocatedBlockNoStorageMedia() { LocatedBlock lb = CreateLocatedBlockNoStorageMedia(); HdfsProtos.LocatedBlockProto lbProto = PBHelper.Convert(lb); LocatedBlock lb2 = PBHelper.Convert(lbProto); Compare(lb, lb2); }
public virtual void TestAclStatusProto() { AclEntry e = new AclEntry.Builder().SetName("test").SetPermission(FsAction.ReadExecute ).SetScope(AclEntryScope.Default).SetType(AclEntryType.Other).Build(); AclStatus s = new AclStatus.Builder().Owner("foo").Group("bar").AddEntry(e).Build (); NUnit.Framework.Assert.AreEqual(s, PBHelper.Convert(PBHelper.Convert(s))); }
public virtual void TestConvertRemoteEditLog() { RemoteEditLog l = new RemoteEditLog(1, 100); HdfsProtos.RemoteEditLogProto lProto = PBHelper.Convert(l); RemoteEditLog l1 = PBHelper.Convert(lProto); Compare(l, l1); }
public virtual void TestConvertDatanodeStorage() { DatanodeStorage dns1 = new DatanodeStorage("id1", DatanodeStorage.State.Normal, StorageType .Ssd); HdfsProtos.DatanodeStorageProto proto = PBHelper.Convert(dns1); DatanodeStorage dns2 = PBHelper.Convert(proto); Compare(dns1, dns2); }
public virtual void TestConvertBlockToken() { Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token = new Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier>(Sharpen.Runtime.GetBytesForString("identifier"), Sharpen.Runtime.GetBytesForString ("password"), new Text("kind"), new Text("service")); SecurityProtos.TokenProto tokenProto = PBHelper.Convert(token); Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token2 = PBHelper.Convert (tokenProto); Compare(token, token2); }
/// <exception cref="System.IO.IOException"/> public override QJournalProtocolProtos.GetEditLogManifestResponseProto GetEditLogManifest (string jid, long sinceTxId, bool inProgressOk) { RemoteEditLogManifest manifest = jn.GetOrCreateJournal(jid).GetEditLogManifest(sinceTxId , inProgressOk); return((QJournalProtocolProtos.GetEditLogManifestResponseProto)QJournalProtocolProtos.GetEditLogManifestResponseProto .NewBuilder().SetManifest(PBHelper.Convert(manifest)).SetHttpPort(jn.GetBoundHttpAddress ().Port).SetFromURL(jn.GetHttpServerURI()).Build()); }
/// <summary>Load FileDiff list for a file with snapshot feature</summary> /// <exception cref="System.IO.IOException"/> private void LoadFileDiffList(InputStream @in, INodeFile file, int size) { FileDiffList diffs = new FileDiffList(); FSImageFormatProtobuf.LoaderContext state = parent.GetLoaderContext(); for (int i = 0; i < size; i++) { FsImageProto.SnapshotDiffSection.FileDiff pbf = FsImageProto.SnapshotDiffSection.FileDiff .ParseDelimitedFrom(@in); INodeFileAttributes copy = null; if (pbf.HasSnapshotCopy()) { FsImageProto.INodeSection.INodeFile fileInPb = pbf.GetSnapshotCopy(); PermissionStatus permission = FSImageFormatPBINode.Loader.LoadPermission(fileInPb .GetPermission(), state.GetStringTable()); AclFeature acl = null; if (fileInPb.HasAcl()) { int[] entries = AclEntryStatusFormat.ToInt(FSImageFormatPBINode.Loader.LoadAclEntries (fileInPb.GetAcl(), state.GetStringTable())); acl = new AclFeature(entries); } XAttrFeature xAttrs = null; if (fileInPb.HasXAttrs()) { xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.LoadXAttrs(fileInPb.GetXAttrs (), state.GetStringTable())); } copy = new INodeFileAttributes.SnapshotCopy(pbf.GetName().ToByteArray(), permission , acl, fileInPb.GetModificationTime(), fileInPb.GetAccessTime(), (short)fileInPb .GetReplication(), fileInPb.GetPreferredBlockSize(), unchecked ((byte)fileInPb.GetStoragePolicyID ()), xAttrs); } FileDiff diff = new FileDiff(pbf.GetSnapshotId(), copy, null, pbf.GetFileSize()); IList <HdfsProtos.BlockProto> bpl = pbf.GetBlocksList(); BlockInfoContiguous[] blocks = new BlockInfoContiguous[bpl.Count]; for (int j = 0; j < e; ++j) { Block blk = PBHelper.Convert(bpl[j]); BlockInfoContiguous storedBlock = fsn.GetBlockManager().GetStoredBlock(blk); if (storedBlock == null) { storedBlock = fsn.GetBlockManager().AddBlockCollection(new BlockInfoContiguous(blk , copy.GetFileReplication()), file); } blocks[j] = storedBlock; } if (blocks.Length > 0) { diff.SetBlocks(blocks); } diffs.AddFirst(diff); } file.AddSnapshotFeature(diffs); }
public static DataChecksum FromProto(DataTransferProtos.ChecksumProto proto) { if (proto == null) { return(null); } int bytesPerChecksum = proto.GetBytesPerChecksum(); DataChecksum.Type type = PBHelper.Convert(proto.GetType()); return(DataChecksum.NewDataChecksum(type, bytesPerChecksum)); }
public virtual void TestConvertExportedBlockKeys() { BlockKey[] keys = new BlockKey[] { GetBlockKey(2), GetBlockKey(3) }; ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, GetBlockKey(1), keys ); HdfsProtos.ExportedBlockKeysProto expKeysProto = PBHelper.Convert(expKeys); ExportedBlockKeys expKeys1 = PBHelper.Convert(expKeysProto); Compare(expKeys, expKeys1); }
/// <exception cref="System.IO.IOException"/> public override void Format(NamespaceInfo ns) { try { // delete old info Stat baseStat = null; Stat ledgerStat = null; if ((baseStat = zkc.Exists(basePath, false)) != null) { if ((ledgerStat = zkc.Exists(ledgerPath, false)) != null) { foreach (EditLogLedgerMetadata l in GetLedgerList(true)) { try { bkc.DeleteLedger(l.GetLedgerId()); } catch (BKException.BKNoSuchLedgerExistsException) { Log.Warn("Ledger " + l.GetLedgerId() + " does not exist;" + " Cannot delete."); } } } ZKUtil.DeleteRecursive(zkc, basePath); } // should be clean now. zkc.Create(basePath, new byte[] { (byte)('0') }, ZooDefs.Ids.OpenAclUnsafe, CreateMode .Persistent); BKJournalProtos.VersionProto.Builder builder = BKJournalProtos.VersionProto.NewBuilder (); builder.SetNamespaceInfo(PBHelper.Convert(ns)).SetLayoutVersion(BkjmLayoutVersion ); byte[] data = Sharpen.Runtime.GetBytesForString(TextFormat.PrintToString(((BKJournalProtos.VersionProto )builder.Build())), Charsets.Utf8); zkc.Create(versionPath, data, ZooDefs.Ids.OpenAclUnsafe, CreateMode.Persistent); zkc.Create(ledgerPath, new byte[] { (byte)('0') }, ZooDefs.Ids.OpenAclUnsafe, CreateMode .Persistent); } catch (KeeperException ke) { Log.Error("Error accessing zookeeper to format", ke); throw new IOException("Error accessing zookeeper to format", ke); } catch (Exception ie) { Sharpen.Thread.CurrentThread().Interrupt(); throw new IOException("Interrupted during format", ie); } catch (BKException bke) { throw new IOException("Error cleaning up ledgers during format", bke); } }