/// <summary>Calculates mask entries required for the ACL.</summary> /// <remarks> /// Calculates mask entries required for the ACL. Mask calculation is performed /// separately for each scope: access and default. This method is responsible /// for handling the following cases of mask calculation: /// 1. Throws an exception if the caller attempts to remove the mask entry of an /// existing ACL that requires it. If the ACL has any named entries, then a /// mask entry is required. /// 2. If the caller supplied a mask in the ACL spec, use it. /// 3. If the caller did not supply a mask, but there are ACL entry changes in /// this scope, then automatically calculate a new mask. The permissions of /// the new mask are the union of the permissions on the group entry and all /// named entries. /// </remarks> /// <param name="aclBuilder">ArrayList<AclEntry> containing entries to build</param> /// <param name="providedMask"> /// EnumMap<AclEntryScope, AclEntry> mapping each scope to /// the mask entry that was provided for that scope (if provided) /// </param> /// <param name="maskDirty"> /// EnumSet<AclEntryScope> which contains a scope if the mask /// entry is dirty (added or deleted) in that scope /// </param> /// <param name="scopeDirty"> /// EnumSet<AclEntryScope> which contains a scope if any entry /// is dirty (added or deleted) in that scope /// </param> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.AclException">if validation fails /// </exception> private static void CalculateMasks(IList <AclEntry> aclBuilder, EnumMap <AclEntryScope , AclEntry> providedMask, EnumSet <AclEntryScope> maskDirty, EnumSet <AclEntryScope > scopeDirty) { EnumSet <AclEntryScope> scopeFound = EnumSet.NoneOf <AclEntryScope>(); EnumMap <AclEntryScope, FsAction> unionPerms = Maps.NewEnumMap <AclEntryScope>(); EnumSet <AclEntryScope> maskNeeded = EnumSet.NoneOf <AclEntryScope>(); // Determine which scopes are present, which scopes need a mask, and the // union of group class permissions in each scope. foreach (AclEntry entry in aclBuilder) { scopeFound.AddItem(entry.GetScope()); if (entry.GetType() == AclEntryType.Group || entry.GetName() != null) { FsAction scopeUnionPerms = Objects.FirstNonNull(unionPerms[entry.GetScope()], FsAction .None); unionPerms[entry.GetScope()] = scopeUnionPerms.Or(entry.GetPermission()); } if (entry.GetName() != null) { maskNeeded.AddItem(entry.GetScope()); } } // Add mask entry if needed in each scope. foreach (AclEntryScope scope in scopeFound) { if (!providedMask.Contains(scope) && maskNeeded.Contains(scope) && maskDirty.Contains (scope)) { // Caller explicitly removed mask entry, but it's required. throw new AclException("Invalid ACL: mask is required and cannot be deleted."); } else { if (providedMask.Contains(scope) && (!scopeDirty.Contains(scope) || maskDirty.Contains (scope))) { // Caller explicitly provided new mask, or we are preserving the existing // mask in an unchanged scope. aclBuilder.AddItem(providedMask[scope]); } else { if (maskNeeded.Contains(scope) || providedMask.Contains(scope)) { // Otherwise, if there are maskable entries present, or the ACL // previously had a mask, then recalculate a mask automatically. aclBuilder.AddItem(new AclEntry.Builder().SetScope(scope).SetType(AclEntryType.Mask ).SetPermission(unionPerms[scope]).Build()); } } } } }
private static TimelineEntity MaskFields(TimelineEntity entity, EnumSet <TimelineReader.Field > fields) { // Conceal the fields that are not going to be exposed TimelineEntity entityToReturn = new TimelineEntity(); entityToReturn.SetEntityId(entity.GetEntityId()); entityToReturn.SetEntityType(entity.GetEntityType()); entityToReturn.SetStartTime(entity.GetStartTime()); entityToReturn.SetDomainId(entity.GetDomainId()); // Deep copy if (fields.Contains(TimelineReader.Field.Events)) { entityToReturn.AddEvents(entity.GetEvents()); } else { if (fields.Contains(TimelineReader.Field.LastEventOnly)) { entityToReturn.AddEvent(entity.GetEvents()[0]); } else { entityToReturn.SetEvents(null); } } if (fields.Contains(TimelineReader.Field.RelatedEntities)) { entityToReturn.AddRelatedEntities(entity.GetRelatedEntities()); } else { entityToReturn.SetRelatedEntities(null); } if (fields.Contains(TimelineReader.Field.PrimaryFilters)) { entityToReturn.AddPrimaryFilters(entity.GetPrimaryFilters()); } else { entityToReturn.SetPrimaryFilters(null); } if (fields.Contains(TimelineReader.Field.OtherInfo)) { entityToReturn.AddOtherInfo(entity.GetOtherInfo()); } else { entityToReturn.SetOtherInfo(null); } return(entityToReturn); }
/// <summary>Validate the CreateFlag and throw exception if it is invalid</summary> /// <param name="flag">set of CreateFlag</param> /// <exception cref="Org.Apache.Hadoop.HadoopIllegalArgumentException">if the CreateFlag is invalid /// </exception> public static void Validate(EnumSet <Org.Apache.Hadoop.FS.CreateFlag> flag) { if (flag == null || flag.IsEmpty()) { throw new HadoopIllegalArgumentException(flag + " does not specify any options"); } bool append = flag.Contains(Org.Apache.Hadoop.FS.CreateFlag.Append); bool overwrite = flag.Contains(Org.Apache.Hadoop.FS.CreateFlag.Overwrite); // Both append and overwrite is an error if (append && overwrite) { throw new HadoopIllegalArgumentException(flag + "Both append and overwrite options cannot be enabled." ); } }
/// <summary>Get or create a memory map for this replica.</summary> /// <remarks> /// Get or create a memory map for this replica. /// There are two kinds of ClientMmap objects we could fetch here: one that /// will always read pre-checksummed data, and one that may read data that /// hasn't been checksummed. /// If we fetch the former, "safe" kind of ClientMmap, we have to increment /// the anchor count on the shared memory slot. This will tell the DataNode /// not to munlock the block until this ClientMmap is closed. /// If we fetch the latter, we don't bother with anchoring. /// </remarks> /// <param name="opts">The options to use, such as SKIP_CHECKSUMS.</param> /// <returns>null on failure; the ClientMmap otherwise.</returns> public virtual ClientMmap GetClientMmap(EnumSet <ReadOption> opts) { bool anchor = verifyChecksum && (opts.Contains(ReadOption.SkipChecksums) == false ); if (anchor) { if (!CreateNoChecksumContext()) { if (Log.IsTraceEnabled()) { Log.Trace("can't get an mmap for " + block + " of " + filename + " since SKIP_CHECKSUMS was not given, " + "we aren't skipping checksums, and the block is not mlocked."); } return(null); } } ClientMmap clientMmap = null; try { clientMmap = replica.GetOrCreateClientMmap(anchor); } finally { if ((clientMmap == null) && anchor) { ReleaseNoChecksumContext(); } } return(clientMmap); }
public static IList <RMNode> QueryRMNodes(RMContext context, EnumSet <NodeState> acceptedStates ) { // nodes contains nodes that are NEW, RUNNING OR UNHEALTHY AList <RMNode> results = new AList <RMNode>(); if (acceptedStates.Contains(NodeState.New) || acceptedStates.Contains(NodeState.Running ) || acceptedStates.Contains(NodeState.Unhealthy)) { foreach (RMNode rmNode in context.GetRMNodes().Values) { if (acceptedStates.Contains(rmNode.GetState())) { results.AddItem(rmNode); } } } // inactiveNodes contains nodes that are DECOMMISSIONED, LOST, OR REBOOTED if (acceptedStates.Contains(NodeState.Decommissioned) || acceptedStates.Contains( NodeState.Lost) || acceptedStates.Contains(NodeState.Rebooted)) { foreach (RMNode rmNode in context.GetInactiveRMNodes().Values) { if (acceptedStates.Contains(rmNode.GetState())) { results.AddItem(rmNode); } } } return(results); }
/// <summary>Validate the CreateFlag for the append operation.</summary> /// <remarks> /// Validate the CreateFlag for the append operation. The flag must contain /// APPEND, and cannot contain OVERWRITE. /// </remarks> public static void ValidateForAppend(EnumSet <Org.Apache.Hadoop.FS.CreateFlag> flag ) { Validate(flag); if (!flag.Contains(Org.Apache.Hadoop.FS.CreateFlag.Append)) { throw new HadoopIllegalArgumentException(flag + " does not contain APPEND"); } }
/// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> /// <exception cref="Org.Apache.Avro.AvroRemoteException"/> /// <exception cref="System.TypeLoadException"/> public virtual void TestJobHistoryData() { if (!(new FilePath(MiniMRYarnCluster.Appjar)).Exists()) { Log.Info("MRAppJar " + MiniMRYarnCluster.Appjar + " not found. Not running test." ); return; } SleepJob sleepJob = new SleepJob(); sleepJob.SetConf(mrCluster.GetConfig()); // Job with 3 maps and 2 reduces Job job = sleepJob.CreateJob(3, 2, 1000, 1, 500, 1); job.SetJarByClass(typeof(SleepJob)); job.AddFileToClassPath(AppJar); // The AppMaster jar itself. job.WaitForCompletion(true); Counters counterMR = job.GetCounters(); JobId jobId = TypeConverter.ToYarn(job.GetJobID()); ApplicationId appID = jobId.GetAppId(); int pollElapsed = 0; while (true) { Sharpen.Thread.Sleep(1000); pollElapsed += 1000; if (TerminalRmAppStates.Contains(mrCluster.GetResourceManager().GetRMContext().GetRMApps ()[appID].GetState())) { break; } if (pollElapsed >= 60000) { Log.Warn("application did not reach terminal state within 60 seconds"); break; } } NUnit.Framework.Assert.AreEqual(RMAppState.Finished, mrCluster.GetResourceManager ().GetRMContext().GetRMApps()[appID].GetState()); Counters counterHS = job.GetCounters(); //TODO the Assert below worked. need to check //Should we compare each field or convert to V2 counter and compare Log.Info("CounterHS " + counterHS); Log.Info("CounterMR " + counterMR); NUnit.Framework.Assert.AreEqual(counterHS, counterMR); HSClientProtocol historyClient = InstantiateHistoryProxy(); GetJobReportRequest gjReq = Org.Apache.Hadoop.Yarn.Util.Records.NewRecord <GetJobReportRequest >(); gjReq.SetJobId(jobId); JobReport jobReport = historyClient.GetJobReport(gjReq).GetJobReport(); VerifyJobReport(jobReport, jobId); }
public static int DivideAndCeilContainers(Resource required, Resource factor, EnumSet <YarnServiceProtos.SchedulerResourceTypes> resourceTypes) { if (resourceTypes.Contains(YarnServiceProtos.SchedulerResourceTypes.Cpu)) { return(Math.Max(DivideAndCeil(required.GetMemory(), factor.GetMemory()), DivideAndCeil (required.GetVirtualCores(), factor.GetVirtualCores()))); } return(DivideAndCeil(required.GetMemory(), factor.GetMemory())); }
public static int ComputeAvailableContainers(Resource available, Resource required , EnumSet <YarnServiceProtos.SchedulerResourceTypes> resourceTypes) { if (resourceTypes.Contains(YarnServiceProtos.SchedulerResourceTypes.Cpu)) { return(Math.Min(available.GetMemory() / required.GetMemory(), available.GetVirtualCores () / required.GetVirtualCores())); } return(available.GetMemory() / required.GetMemory()); }
public override FSDataOutputStream CreateNonRecursive(Path f, FsPermission permission , EnumSet <CreateFlag> flags, int bufferSize, short replication, long blockSize, Progressable progress) { if (Exists(f) && !flags.Contains(CreateFlag.Overwrite)) { throw new FileAlreadyExistsException("File already exists: " + f); } return(new FSDataOutputStream(new BufferedOutputStream(CreateOutputStreamWithMode (f, false, permission), bufferSize), statistics)); }
private static StorageType GetFallback(EnumSet <StorageType> unavailables, StorageType [] fallbacks) { foreach (StorageType fb in fallbacks) { if (!unavailables.Contains(fb)) { return(fb); } } return(null); }
protected internal virtual void RenderData(HtmlBlock.Block html) { Hamlet.TBODY <Hamlet.TABLE <Org.Apache.Hadoop.Yarn.Webapp.Hamlet.Hamlet> > tbody = html .Table("#apps").Thead().Tr().Th(".id", "ID").Th(".user", "User").Th(".name", "Name" ).Th(".type", "Application Type").Th(".queue", "Queue").Th(".starttime", "StartTime" ).Th(".finishtime", "FinishTime").Th(".state", "State").Th(".finalstatus", "FinalStatus" ).Th(".progress", "Progress").Th(".ui", "Tracking UI").().().Tbody(); StringBuilder appsTableData = new StringBuilder("[\n"); foreach (ApplicationReport appReport in appReports) { // TODO: remove the following condition. It is still here because // the history side implementation of ApplicationBaseProtocol // hasn't filtering capability (YARN-1819). if (!reqAppStates.IsEmpty() && !reqAppStates.Contains(appReport.GetYarnApplicationState ())) { continue; } AppInfo app = new AppInfo(appReport); string percent = string.Format("%.1f", app.GetProgress()); appsTableData.Append("[\"<a href='").Append(Url("app", app.GetAppId())).Append("'>" ).Append(app.GetAppId()).Append("</a>\",\"").Append(StringEscapeUtils.EscapeJavaScript (StringEscapeUtils.EscapeHtml(app.GetUser()))).Append("\",\"").Append(StringEscapeUtils .EscapeJavaScript(StringEscapeUtils.EscapeHtml(app.GetName()))).Append("\",\""). Append(StringEscapeUtils.EscapeJavaScript(StringEscapeUtils.EscapeHtml(app.GetType ()))).Append("\",\"").Append(StringEscapeUtils.EscapeJavaScript(StringEscapeUtils .EscapeHtml(app.GetQueue()))).Append("\",\"").Append(app.GetStartedTime()).Append ("\",\"").Append(app.GetFinishedTime()).Append("\",\"").Append(app.GetAppState() == null ? Unavailable : app.GetAppState()).Append("\",\"").Append(app.GetFinalAppStatus ()).Append("\",\"").Append("<br title='").Append(percent).Append("'> <div class='" ).Append(JQueryUI.CProgressbar).Append("' title='").Append(StringHelper.Join(percent , '%')).Append("'> ").Append("<div class='").Append(JQueryUI.CProgressbarValue). Append("' style='").Append(StringHelper.Join("width:", percent, '%')).Append("'> </div> </div>" ).Append("\",\"<a "); // Progress bar string trackingURL = app.GetTrackingUrl() == null || app.GetTrackingUrl().Equals( Unavailable) ? null : app.GetTrackingUrl(); string trackingUI = app.GetTrackingUrl() == null || app.GetTrackingUrl().Equals(Unavailable ) ? "Unassigned" : app.GetAppState() == YarnApplicationState.Finished || app.GetAppState () == YarnApplicationState.Failed || app.GetAppState() == YarnApplicationState.Killed ? "History" : "ApplicationMaster"; appsTableData.Append(trackingURL == null ? "#" : "href='" + trackingURL).Append("'>" ).Append(trackingUI).Append("</a>\"],\n"); } if (appsTableData[appsTableData.Length - 2] == ',') { appsTableData.Delete(appsTableData.Length - 2, appsTableData.Length - 1); } appsTableData.Append("]"); html.Script().$type("text/javascript").("var appsTableData=" + appsTableData).(); tbody.().(); }
public virtual void Serialize(XDR xdr) { if (!updateFields.Contains(SetAttr3.SetAttrField.Mode)) { xdr.WriteBoolean(false); } else { xdr.WriteBoolean(true); xdr.WriteInt(mode); } if (!updateFields.Contains(SetAttr3.SetAttrField.Uid)) { xdr.WriteBoolean(false); } else { xdr.WriteBoolean(true); xdr.WriteInt(uid); } if (!updateFields.Contains(SetAttr3.SetAttrField.Gid)) { xdr.WriteBoolean(false); } else { xdr.WriteBoolean(true); xdr.WriteInt(gid); } if (!updateFields.Contains(SetAttr3.SetAttrField.Size)) { xdr.WriteBoolean(false); } else { xdr.WriteBoolean(true); xdr.WriteLongAsHyper(size); } if (!updateFields.Contains(SetAttr3.SetAttrField.Atime)) { xdr.WriteBoolean(false); } else { xdr.WriteBoolean(true); atime.Serialize(xdr); } if (!updateFields.Contains(SetAttr3.SetAttrField.Mtime)) { xdr.WriteBoolean(false); } else { xdr.WriteBoolean(true); mtime.Serialize(xdr); } }
/// <summary>Validate the CreateFlag for create operation</summary> /// <param name="path"> /// Object representing the path; usually String or /// <see cref="Path"/> /// </param> /// <param name="pathExists">pass true if the path exists in the file system</param> /// <param name="flag">set of CreateFlag</param> /// <exception cref="System.IO.IOException">on error</exception> /// <exception cref="Org.Apache.Hadoop.HadoopIllegalArgumentException">if the CreateFlag is invalid /// </exception> public static void Validate(object path, bool pathExists, EnumSet <Org.Apache.Hadoop.FS.CreateFlag > flag) { Validate(flag); bool append = flag.Contains(Org.Apache.Hadoop.FS.CreateFlag.Append); bool overwrite = flag.Contains(Org.Apache.Hadoop.FS.CreateFlag.Overwrite); if (pathExists) { if (!(append || overwrite)) { throw new FileAlreadyExistsException("File already exists: " + path.ToString() + ". Append or overwrite option must be specified in " + flag); } } else { if (!flag.Contains(Org.Apache.Hadoop.FS.CreateFlag.Create)) { throw new FileNotFoundException("Non existing file: " + path.ToString() + ". Create option is not specified in " + flag); } } }
/// <exception cref="System.IO.IOException"/> public static void Validate(string xAttrName, bool xAttrExists, EnumSet <Org.Apache.Hadoop.FS.XAttrSetFlag > flag) { if (flag == null || flag.IsEmpty()) { throw new HadoopIllegalArgumentException("A flag must be specified."); } if (xAttrExists) { if (!flag.Contains(Org.Apache.Hadoop.FS.XAttrSetFlag.Replace)) { throw new IOException("XAttr: " + xAttrName + " already exists. The REPLACE flag must be specified." ); } } else { if (!flag.Contains(Org.Apache.Hadoop.FS.XAttrSetFlag.Create)) { throw new IOException("XAttr: " + xAttrName + " does not exist. The CREATE flag must be specified." ); } } }
/// <summary>Completely replaces the ACL with the entries of the ACL spec.</summary> /// <remarks> /// Completely replaces the ACL with the entries of the ACL spec. If /// necessary, recalculates the mask entries. If necessary, default entries /// are inferred by copying the permissions of the corresponding access /// entries. Replacement occurs separately for each of the access ACL and the /// default ACL. If the ACL spec contains only access entries, then the /// existing default entries are retained. If the ACL spec contains only /// default entries, then the existing access entries are retained. If the ACL /// spec contains both access and default entries, then both are replaced. /// </remarks> /// <param name="existingAcl">List<AclEntry> existing ACL</param> /// <param name="inAclSpec">List<AclEntry> ACL spec containing replacement entries</param> /// <returns>List<AclEntry> new ACL</returns> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.AclException">if validation fails /// </exception> public static IList <AclEntry> ReplaceAclEntries(IList <AclEntry> existingAcl, IList <AclEntry> inAclSpec) { AclTransformation.ValidatedAclSpec aclSpec = new AclTransformation.ValidatedAclSpec (inAclSpec); AList <AclEntry> aclBuilder = Lists.NewArrayListWithCapacity(MaxEntries); // Replacement is done separately for each scope: access and default. EnumMap <AclEntryScope, AclEntry> providedMask = Maps.NewEnumMap <AclEntryScope>(); EnumSet <AclEntryScope> maskDirty = EnumSet.NoneOf <AclEntryScope>(); EnumSet <AclEntryScope> scopeDirty = EnumSet.NoneOf <AclEntryScope>(); foreach (AclEntry aclSpecEntry in aclSpec) { scopeDirty.AddItem(aclSpecEntry.GetScope()); if (aclSpecEntry.GetType() == AclEntryType.Mask) { providedMask[aclSpecEntry.GetScope()] = aclSpecEntry; maskDirty.AddItem(aclSpecEntry.GetScope()); } else { aclBuilder.AddItem(aclSpecEntry); } } // Copy existing entries if the scope was not replaced. foreach (AclEntry existingEntry in existingAcl) { if (!scopeDirty.Contains(existingEntry.GetScope())) { if (existingEntry.GetType() == AclEntryType.Mask) { providedMask[existingEntry.GetScope()] = existingEntry; } else { aclBuilder.AddItem(existingEntry); } } } CopyDefaultsIfNeeded(aclBuilder); CalculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return(BuildAndValidateAcl(aclBuilder)); }
/// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/> /// <exception cref="Org.Apache.Hadoop.FS.FileAlreadyExistsException"/> /// <exception cref="System.IO.FileNotFoundException"/> /// <exception cref="Org.Apache.Hadoop.FS.ParentNotDirectoryException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnsupportedFileSystemException"/> /// <exception cref="System.IO.IOException"/> public override FSDataOutputStream Create(Path f, EnumSet <CreateFlag> createFlag, params Options.CreateOpts[] opts) { // Need to translate the FileContext-style options into FileSystem-style // Permissions with umask Options.CreateOpts.Perms permOpt = Options.CreateOpts.GetOpt <Options.CreateOpts.Perms >(opts); FsPermission umask = FsPermission.GetUMask(fs.GetConf()); FsPermission permission = (permOpt != null) ? permOpt.GetValue() : FsPermission.GetFileDefault ().ApplyUMask(umask); permission = permission.ApplyUMask(umask); // Overwrite bool overwrite = createFlag.Contains(CreateFlag.Overwrite); // bufferSize int bufferSize = fs.GetConf().GetInt(CommonConfigurationKeysPublic.IoFileBufferSizeKey , CommonConfigurationKeysPublic.IoFileBufferSizeDefault); Options.CreateOpts.BufferSize bufOpt = Options.CreateOpts.GetOpt <Options.CreateOpts.BufferSize >(opts); bufferSize = (bufOpt != null) ? bufOpt.GetValue() : bufferSize; // replication short replication = fs.GetDefaultReplication(f); Options.CreateOpts.ReplicationFactor repOpt = Options.CreateOpts.GetOpt <Options.CreateOpts.ReplicationFactor >(opts); replication = (repOpt != null) ? repOpt.GetValue() : replication; // blockSize long blockSize = fs.GetDefaultBlockSize(f); Options.CreateOpts.BlockSize blockOpt = Options.CreateOpts.GetOpt <Options.CreateOpts.BlockSize >(opts); blockSize = (blockOpt != null) ? blockOpt.GetValue() : blockSize; // Progressable Progressable progress = null; Options.CreateOpts.Progress progressOpt = Options.CreateOpts.GetOpt <Options.CreateOpts.Progress >(opts); progress = (progressOpt != null) ? progressOpt.GetValue() : progress; return(fs.Create(f, permission, overwrite, bufferSize, replication, blockSize, progress )); }
/// <summary> /// Choose the storage types for storing the remaining replicas, given the /// replication number, the storage types of the chosen replicas and /// the unavailable storage types. /// </summary> /// <remarks> /// Choose the storage types for storing the remaining replicas, given the /// replication number, the storage types of the chosen replicas and /// the unavailable storage types. It uses fallback storage in case that /// the desired storage type is unavailable. /// </remarks> /// <param name="replication">the replication number.</param> /// <param name="chosen">the storage types of the chosen replicas.</param> /// <param name="unavailables">the unavailable storage types.</param> /// <param name="isNewBlock">Is it for new block creation?</param> /// <returns> /// a list of /// <see cref="Org.Apache.Hadoop.FS.StorageType"/> /// s for storing the replicas of a block. /// </returns> public virtual IList <StorageType> ChooseStorageTypes(short replication, IEnumerable <StorageType> chosen, EnumSet <StorageType> unavailables, bool isNewBlock) { IList <StorageType> excess = new List <StorageType>(); IList <StorageType> storageTypes = ChooseStorageTypes(replication, chosen, excess); int expectedSize = storageTypes.Count - excess.Count; IList <StorageType> removed = new List <StorageType>(); for (int i = storageTypes.Count - 1; i >= 0; i--) { // replace/remove unavailable storage types. StorageType t = storageTypes[i]; if (unavailables.Contains(t)) { StorageType fallback = isNewBlock ? GetCreationFallback(unavailables) : GetReplicationFallback (unavailables); if (fallback == null) { removed.AddItem(storageTypes.Remove(i)); } else { storageTypes.Set(i, fallback); } } } // remove excess storage types after fallback replacement. Diff(storageTypes, excess, null); if (storageTypes.Count < expectedSize) { Log.Warn("Failed to place enough replicas: expected size is " + expectedSize + " but only " + storageTypes.Count + " storage types can be selected " + "(replication=" + replication + ", selected=" + storageTypes + ", unavailable=" + unavailables + ", removed=" + removed + ", policy=" + this + ")"); } return(storageTypes); }
public override bool IsApplicationActive(ApplicationId id) { ApplicationReport report = null; try { report = client.GetApplicationReport(id); } catch (ApplicationNotFoundException) { // the app does not exist return(false); } catch (IOException e) { throw new YarnException(e); } if (report == null) { // the app does not exist return(false); } return(ActiveStates.Contains(report.GetYarnApplicationState())); }
private void UpdateButtons() { if (fListModel == null) { fBtnAdd.Visible = fButtons.Contains(SheetButton.lbAdd); fBtnDelete.Visible = fButtons.Contains(SheetButton.lbDelete); fBtnEdit.Visible = fButtons.Contains(SheetButton.lbEdit); fBtnLinkJump.Visible = fButtons.Contains(SheetButton.lbJump); fBtnMoveUp.Visible = fButtons.Contains(SheetButton.lbMoveUp); fBtnMoveDown.Visible = fButtons.Contains(SheetButton.lbMoveDown); //fToolBar.Enabled = !fButtons.IsEmpty(); } else { EnumSet <RecordAction> allowedActions = fListModel.AllowedActions; fBtnAdd.Visible = allowedActions.Contains(RecordAction.raAdd); fBtnDelete.Visible = allowedActions.Contains(RecordAction.raDelete); fBtnEdit.Visible = allowedActions.Contains(RecordAction.raEdit); fBtnLinkJump.Visible = allowedActions.Contains(RecordAction.raJump); fBtnMoveUp.Visible = allowedActions.Contains(RecordAction.raMoveUp); fBtnMoveDown.Visible = allowedActions.Contains(RecordAction.raMoveDown); //fToolBar.Visible = !allowedActions.IsEmpty(); } }
public void Test_Common() { EnumSet <RestrictionEnum> es = EnumSet <RestrictionEnum> .Create(); Assert.IsTrue(es.IsEmpty()); es.Include(null); Assert.IsTrue(es.IsEmpty()); es.Include(RestrictionEnum.rnPrivacy, RestrictionEnum.rnLocked); Assert.IsTrue(es.Contains(RestrictionEnum.rnPrivacy)); Assert.IsFalse(es.Contains(RestrictionEnum.rnNone)); Assert.IsFalse(es.IsEmpty()); es.Exclude(RestrictionEnum.rnPrivacy); Assert.IsFalse(es.Contains(RestrictionEnum.rnPrivacy)); Assert.IsTrue(es.Contains(RestrictionEnum.rnLocked)); es = EnumSet <RestrictionEnum> .Create(RestrictionEnum.rnNone, RestrictionEnum.rnLocked); Assert.IsTrue(es.Contains(RestrictionEnum.rnNone)); Assert.IsTrue(es.Contains(RestrictionEnum.rnLocked)); string test = es.ToString().Substring(64 - 8); Assert.AreEqual("00000011", test); // clone test EnumSet <RestrictionEnum> copy = (EnumSet <RestrictionEnum>)es.Clone(); test = copy.ToString().Substring(64 - 8); Assert.AreEqual("00000011", test); // clear test copy.Clear(); Assert.IsTrue(copy.IsEmpty()); // EnumSet <RestrictionEnum> es2 = EnumSet <RestrictionEnum> .Create(RestrictionEnum.rnNone, RestrictionEnum.rnLocked); Assert.IsTrue(es.Equals(es2)); Assert.IsFalse(es.Equals(null)); Assert.IsTrue(es.Contains(RestrictionEnum.rnLocked)); Assert.IsFalse(es.Contains(RestrictionEnum.rnPrivacy)); EnumSet <RestrictionEnum> es3 = EnumSet <RestrictionEnum> .Create(RestrictionEnum.rnLocked); EnumSet <RestrictionEnum> es4 = es * es3; Assert.IsTrue(es4.Contains(RestrictionEnum.rnLocked)); es = EnumSet <RestrictionEnum> .Create(RestrictionEnum.rnNone); es2 = EnumSet <RestrictionEnum> .Create(RestrictionEnum.rnLocked); Assert.IsTrue(es != es2); es = es + es2; es3 = EnumSet <RestrictionEnum> .Create(RestrictionEnum.rnNone, RestrictionEnum.rnLocked); Assert.IsTrue(es.Equals(es3)); Assert.IsFalse(es3.ContainsAll(new RestrictionEnum[] {})); Assert.IsTrue(es3.ContainsAll(RestrictionEnum.rnNone, RestrictionEnum.rnLocked)); Assert.IsFalse(es3.ContainsAll(RestrictionEnum.rnNone, RestrictionEnum.rnPrivacy)); Assert.IsFalse(es3.HasIntersect(new RestrictionEnum[] {})); Assert.IsTrue(es3.HasIntersect(RestrictionEnum.rnNone, RestrictionEnum.rnPrivacy)); Assert.IsFalse(es3.HasIntersect(RestrictionEnum.rnPrivacy)); es = es - es2; es3 = EnumSet <RestrictionEnum> .Create(RestrictionEnum.rnNone); Assert.IsTrue(es == es3); Assert.AreEqual("0000000000000000000000000000000000000000000000000000000000000001", es3.ToString()); Assert.AreNotEqual(0, es3.GetHashCode()); }
/// <summary> /// The method starts new cluster with defined Configuration; creates a file /// with specified block_size and writes 10 equal sections in it; it also calls /// hflush/hsync after each write and throws an IOException in case of an error. /// </summary> /// <param name="conf">cluster configuration</param> /// <param name="fileName">of the file to be created and processed as required</param> /// <param name="block_size">value to be used for the file's creation</param> /// <param name="replicas">is the number of replicas</param> /// <param name="isSync">hsync or hflush</param> /// <param name="syncFlags">specify the semantic of the sync/flush</param> /// <exception cref="System.IO.IOException">in case of any errors</exception> public static void DoTheJob(Configuration conf, string fileName, long block_size, short replicas, bool isSync, EnumSet <HdfsDataOutputStream.SyncFlag> syncFlags) { byte[] fileContent; int Sections = 10; fileContent = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(replicas). Build(); // Make sure we work with DFS in order to utilize all its functionality DistributedFileSystem fileSystem = cluster.GetFileSystem(); FSDataInputStream @is; try { Path path = new Path(fileName); string pathName = new Path(fileSystem.GetWorkingDirectory(), path).ToUri().GetPath (); FSDataOutputStream stm = fileSystem.Create(path, false, 4096, replicas, block_size ); System.Console.Out.WriteLine("Created file " + fileName); int tenth = AppendTestUtil.FileSize / Sections; int rounding = AppendTestUtil.FileSize - tenth * Sections; for (int i = 0; i < Sections; i++) { System.Console.Out.WriteLine("Writing " + (tenth * i) + " to " + (tenth * (i + 1) ) + " section to file " + fileName); // write to the file stm.Write(fileContent, tenth * i, tenth); // Wait while hflush/hsync pushes all packets through built pipeline if (isSync) { ((DFSOutputStream)stm.GetWrappedStream()).Hsync(syncFlags); } else { ((DFSOutputStream)stm.GetWrappedStream()).Hflush(); } // Check file length if updatelength is required if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.UpdateLength)) { long currentFileLength = fileSystem.GetFileStatus(path).GetLen(); NUnit.Framework.Assert.AreEqual("File size doesn't match for hsync/hflush with updating the length" , tenth * (i + 1), currentFileLength); } else { if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.EndBlock)) { LocatedBlocks blocks = fileSystem.dfs.GetLocatedBlocks(pathName, 0); NUnit.Framework.Assert.AreEqual(i + 1, blocks.GetLocatedBlocks().Count); } } byte[] toRead = new byte[tenth]; byte[] expected = new byte[tenth]; System.Array.Copy(fileContent, tenth * i, expected, 0, tenth); // Open the same file for read. Need to create new reader after every write operation(!) @is = fileSystem.Open(path); @is.Seek(tenth * i); int readBytes = @is.Read(toRead, 0, tenth); System.Console.Out.WriteLine("Has read " + readBytes); NUnit.Framework.Assert.IsTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth)); @is.Close(); CheckData(toRead, 0, readBytes, expected, "Partial verification"); } System.Console.Out.WriteLine("Writing " + (tenth * Sections) + " to " + (tenth * Sections + rounding) + " section to file " + fileName); stm.Write(fileContent, tenth * Sections, rounding); stm.Close(); NUnit.Framework.Assert.AreEqual("File size doesn't match ", AppendTestUtil.FileSize , fileSystem.GetFileStatus(path).GetLen()); AppendTestUtil.CheckFullFile(fileSystem, path, fileContent.Length, fileContent, "hflush()" ); } finally { fileSystem.Close(); cluster.Shutdown(); } }
public bool HasFlag(PersonFlag flag) { return(fFlags.Contains(flag)); }
/// <exception cref="Org.Apache.Hadoop.Yarn.Exceptions.YarnException"/> /// <exception cref="System.IO.IOException"/> public override ApplicationId SubmitApplication(ApplicationSubmissionContext appContext ) { ApplicationId applicationId = appContext.GetApplicationId(); if (applicationId == null) { throw new ApplicationIdNotProvidedException("ApplicationId is not provided in ApplicationSubmissionContext" ); } SubmitApplicationRequest request = Org.Apache.Hadoop.Yarn.Util.Records.NewRecord < SubmitApplicationRequest>(); request.SetApplicationSubmissionContext(appContext); // Automatically add the timeline DT into the CLC // Only when the security and the timeline service are both enabled if (IsSecurityEnabled() && timelineServiceEnabled) { AddTimelineDelegationToken(appContext.GetAMContainerSpec()); } //TODO: YARN-1763:Handle RM failovers during the submitApplication call. rmClient.SubmitApplication(request); int pollCount = 0; long startTime = Runtime.CurrentTimeMillis(); EnumSet <YarnApplicationState> waitingStates = EnumSet.Of(YarnApplicationState.New , YarnApplicationState.NewSaving, YarnApplicationState.Submitted); EnumSet <YarnApplicationState> failToSubmitStates = EnumSet.Of(YarnApplicationState .Failed, YarnApplicationState.Killed); while (true) { try { ApplicationReport appReport = GetApplicationReport(applicationId); YarnApplicationState state = appReport.GetYarnApplicationState(); if (!waitingStates.Contains(state)) { if (failToSubmitStates.Contains(state)) { throw new YarnException("Failed to submit " + applicationId + " to YARN : " + appReport .GetDiagnostics()); } Log.Info("Submitted application " + applicationId); break; } long elapsedMillis = Runtime.CurrentTimeMillis() - startTime; if (EnforceAsyncAPITimeout() && elapsedMillis >= asyncApiPollTimeoutMillis) { throw new YarnException("Timed out while waiting for application " + applicationId + " to be submitted successfully"); } // Notify the client through the log every 10 poll, in case the client // is blocked here too long. if (++pollCount % 10 == 0) { Log.Info("Application submission is not finished, " + "submitted application " + applicationId + " is still in " + state); } try { Sharpen.Thread.Sleep(submitPollIntervalMillis); } catch (Exception) { Log.Error("Interrupted while waiting for application " + applicationId + " to be successfully submitted." ); } } catch (ApplicationNotFoundException) { // FailOver or RM restart happens before RMStateStore saves // ApplicationState Log.Info("Re-submit application " + applicationId + "with the " + "same ApplicationSubmissionContext" ); rmClient.SubmitApplication(request); } } return(applicationId); }
public static bool IsArrowKey(this KeyCode keyCode) { return(ArrowKeys.Contains(keyCode)); }
public IDictionary<string, object> ExtraPropertiesForRevision(RevisionInternal rev , EnumSet<Database.TDContentOptions> contentOptions) { string docId = rev.GetDocId(); string revId = rev.GetRevId(); long sequenceNumber = rev.GetSequence(); System.Diagnostics.Debug.Assert((revId != null)); System.Diagnostics.Debug.Assert((sequenceNumber > 0)); // Get attachment metadata, and optionally the contents: IDictionary<string, object> attachmentsDict = GetAttachmentsDictForSequenceWithContent (sequenceNumber, contentOptions); // Get more optional stuff to put in the properties: //OPT: This probably ends up making redundant SQL queries if multiple options are enabled. long localSeq = null; if (contentOptions.Contains(Database.TDContentOptions.TDIncludeLocalSeq)) { localSeq = sequenceNumber; } IDictionary<string, object> revHistory = null; if (contentOptions.Contains(Database.TDContentOptions.TDIncludeRevs)) { revHistory = GetRevisionHistoryDict(rev); } IList<object> revsInfo = null; if (contentOptions.Contains(Database.TDContentOptions.TDIncludeRevsInfo)) { revsInfo = new AList<object>(); IList<RevisionInternal> revHistoryFull = GetRevisionHistory(rev); foreach (RevisionInternal historicalRev in revHistoryFull) { IDictionary<string, object> revHistoryItem = new Dictionary<string, object>(); string status = "available"; if (historicalRev.IsDeleted()) { status = "deleted"; } if (historicalRev.IsMissing()) { status = "missing"; } revHistoryItem.Put("rev", historicalRev.GetRevId()); revHistoryItem.Put("status", status); revsInfo.AddItem(revHistoryItem); } } IList<string> conflicts = null; if (contentOptions.Contains(Database.TDContentOptions.TDIncludeConflicts)) { RevisionList revs = GetAllRevisionsOfDocumentID(docId, true); if (revs.Count > 1) { conflicts = new AList<string>(); foreach (RevisionInternal historicalRev in revs) { if (!historicalRev.Equals(rev)) { conflicts.AddItem(historicalRev.GetRevId()); } } } } IDictionary<string, object> result = new Dictionary<string, object>(); result.Put("_id", docId); result.Put("_rev", revId); if (rev.IsDeleted()) { result.Put("_deleted", true); } if (attachmentsDict != null) { result.Put("_attachments", attachmentsDict); } if (localSeq != null) { result.Put("_local_seq", localSeq); } if (revHistory != null) { result.Put("_revisions", revHistory); } if (revsInfo != null) { result.Put("_revs_info", revsInfo); } if (conflicts != null) { result.Put("_conflicts", conflicts); } return result; }
public bool Contains(Qualification qualification) { return(set.Contains(qualification)); }
private void CheckDependencies(IList <Tuple <RuleDependency, IElement> > dependencies, ITypeMirror recognizerType) { string[] ruleNames = GetRuleNames(recognizerType); int[] ruleVersions = GetRuleVersions(recognizerType, ruleNames); RuleDependencyProcessor.RuleRelations relations = ExtractRuleRelations(recognizerType); foreach (Tuple <RuleDependency, IElement> dependency in dependencies) { try { if (!processingEnv.GetTypeUtils().IsAssignable(GetRecognizerType(dependency.Item1), recognizerType)) { continue; } // this is the rule in the dependency set with the highest version number int effectiveRule = dependency.Item1.Rule(); if (effectiveRule < 0 || effectiveRule >= ruleVersions.Length) { Tuple <IAnnotationMirror, IAnnotationValue> ruleReferenceElement = FindRuleDependencyProperty(dependency, RuleDependencyProcessor.RuleDependencyProperty.Rule); string message = string.Format("Rule dependency on unknown rule {0}@{1} in {2}", dependency.Item1.Rule(), dependency.Item1.Version(), GetRecognizerType(dependency.Item1).ToString()); if (ruleReferenceElement != null) { processingEnv.GetMessager().PrintMessage(Diagnostic.Kind.Error, message, dependency.Item2, ruleReferenceElement.Item1, ruleReferenceElement.Item2); } else { processingEnv.GetMessager().PrintMessage(Diagnostic.Kind.Error, message, dependency.Item2); } continue; } EnumSet <Dependents> dependents = EnumSet.Of(Dependents.Self, dependency.Item1.Dependents()); ReportUnimplementedDependents(dependency, dependents); BitSet @checked = new BitSet(); int highestRequiredDependency = CheckDependencyVersion(dependency, ruleNames, ruleVersions, effectiveRule, null); if (dependents.Contains(Dependents.Parents)) { BitSet parents = relations.parents[dependency.Item1.Rule()]; for (int parent = parents.NextSetBit(0); parent >= 0; parent = parents.NextSetBit(parent + 1)) { if (parent < 0 || parent >= ruleVersions.Length || @checked.Get(parent)) { continue; } @checked.Set(parent); int required = CheckDependencyVersion(dependency, ruleNames, ruleVersions, parent, "parent"); highestRequiredDependency = Math.Max(highestRequiredDependency, required); } } if (dependents.Contains(Dependents.Children)) { BitSet children = relations.children[dependency.Item1.Rule()]; for (int child = children.NextSetBit(0); child >= 0; child = children.NextSetBit(child + 1)) { if (child < 0 || child >= ruleVersions.Length || @checked.Get(child)) { continue; } @checked.Set(child); int required = CheckDependencyVersion(dependency, ruleNames, ruleVersions, child, "child"); highestRequiredDependency = Math.Max(highestRequiredDependency, required); } } if (dependents.Contains(Dependents.Ancestors)) { BitSet ancestors = relations.GetAncestors(dependency.Item1.Rule()); for (int ancestor = ancestors.NextSetBit(0); ancestor >= 0; ancestor = ancestors.NextSetBit(ancestor + 1)) { if (ancestor < 0 || ancestor >= ruleVersions.Length || @checked.Get(ancestor)) { continue; } @checked.Set(ancestor); int required = CheckDependencyVersion(dependency, ruleNames, ruleVersions, ancestor, "ancestor"); highestRequiredDependency = Math.Max(highestRequiredDependency, required); } } if (dependents.Contains(Dependents.Descendants)) { BitSet descendants = relations.GetDescendants(dependency.Item1.Rule()); for (int descendant = descendants.NextSetBit(0); descendant >= 0; descendant = descendants.NextSetBit(descendant + 1)) { if (descendant < 0 || descendant >= ruleVersions.Length || @checked.Get(descendant)) { continue; } @checked.Set(descendant); int required = CheckDependencyVersion(dependency, ruleNames, ruleVersions, descendant, "descendant"); highestRequiredDependency = Math.Max(highestRequiredDependency, required); } } int declaredVersion = dependency.Item1.Version(); if (declaredVersion > highestRequiredDependency) { Tuple <IAnnotationMirror, IAnnotationValue> versionElement = FindRuleDependencyProperty(dependency, RuleDependencyProcessor.RuleDependencyProperty.Version); string message = string.Format("Rule dependency version mismatch: {0} has maximum dependency version {1} (expected {2}) in {3}", ruleNames[dependency.Item1.Rule()], highestRequiredDependency, declaredVersion, GetRecognizerType(dependency.Item1).ToString()); if (versionElement != null) { processingEnv.GetMessager().PrintMessage(Diagnostic.Kind.Error, message, dependency.Item2, versionElement.Item1, versionElement.Item2); } else { processingEnv.GetMessager().PrintMessage(Diagnostic.Kind.Error, message, dependency.Item2); } } } catch (AnnotationTypeMismatchException) { processingEnv.GetMessager().PrintMessage(Diagnostic.Kind.Warning, string.Format("Could not validate rule dependencies for element {0}", dependency.Item2.ToString()), dependency.Item2); } } }
/// <summary>Constructs an "_attachments" dictionary for a revision, to be inserted in its JSON body.</summary> internal IDictionary<String, Object> GetAttachmentsDictForSequenceWithContent(long sequence, EnumSet<TDContentOptions> contentOptions) { Debug.Assert((sequence > 0)); Cursor cursor = null; var args = new Object[] { sequence }; try { cursor = StorageEngine.RawQuery("SELECT filename, key, type, length, revpos FROM attachments WHERE sequence=@", CommandBehavior.SequentialAccess, args); if (!cursor.MoveToNext()) { return null; } var result = new Dictionary<String, Object>(); while (!cursor.IsAfterLast()) { var dataSuppressed = false; var filename = cursor.GetString(0); var keyData = cursor.GetBlob(1); var contentType = cursor.GetString(2); var length = cursor.GetInt(3); var revpos = cursor.GetInt(4); var key = new BlobKey(keyData); var digestString = "sha1-" + Convert.ToBase64String(keyData); var dataBase64 = String.Empty; if (contentOptions.Contains(TDContentOptions.TDIncludeAttachments)) { if (contentOptions.Contains(TDContentOptions.TDBigAttachmentsFollow) && length >= Database.BigAttachmentLength) { dataSuppressed = true; } else { byte[] data = Attachments.BlobForKey(key); if (data != null) { dataBase64 = Convert.ToBase64String(data); } else { // <-- very expensive Log.W(Database.Tag, "Error loading attachment"); } } } var attachment = new Dictionary<string, object>(); if (dataBase64 == null || dataSuppressed) { attachment["stub"] = true; } if (dataBase64 != null) { attachment["data"] = dataBase64; } if (dataSuppressed) { attachment.Put ("follows", true); } attachment["digest"] = digestString; attachment["content_type"] = contentType; attachment["length"] = length; attachment["revpos"] = revpos; result[filename] = attachment; cursor.MoveToNext(); } return result; } catch (SQLException e) { Log.E(Database.Tag, "Error getting attachments for sequence", e); return null; } finally { if (cursor != null) { cursor.Close(); } } }
/// <summary>Inserts the _id, _rev and _attachments properties into the JSON data and stores it in rev. /// </summary> /// <remarks> /// Inserts the _id, _rev and _attachments properties into the JSON data and stores it in rev. /// Rev must already have its revID and sequence properties set. /// </remarks> internal IDictionary<String, Object> ExtraPropertiesForRevision(RevisionInternal rev, EnumSet<TDContentOptions> contentOptions) { var docId = rev.GetDocId(); var revId = rev.GetRevId(); var sequenceNumber = rev.GetSequence(); Debug.Assert((revId != null)); Debug.Assert((sequenceNumber > 0)); // Get attachment metadata, and optionally the contents: var attachmentsDict = GetAttachmentsDictForSequenceWithContent(sequenceNumber, contentOptions); // Get more optional stuff to put in the properties: //OPT: This probably ends up making redundant SQL queries if multiple options are enabled. var localSeq = -1L; if (contentOptions.Contains(TDContentOptions.TDIncludeLocalSeq)) { localSeq = sequenceNumber; } IDictionary<string, object> revHistory = null; if (contentOptions.Contains(TDContentOptions.TDIncludeRevs)) { revHistory = GetRevisionHistoryDict(rev); } IList<object> revsInfo = null; if (contentOptions.Contains(TDContentOptions.TDIncludeRevsInfo)) { revsInfo = new AList<object>(); var revHistoryFull = GetRevisionHistory(rev); foreach (RevisionInternal historicalRev in revHistoryFull) { var revHistoryItem = new Dictionary<string, object>(); var status = "available"; if (historicalRev.IsDeleted()) { status = "deleted"; } // TODO: Detect missing revisions, set status="missing" if (historicalRev.IsMissing()) { status = "missing"; } revHistoryItem.Put("rev", historicalRev.GetRevId()); revHistoryItem["status"] = status; revsInfo.AddItem(revHistoryItem); } } IList<string> conflicts = null; if (contentOptions.Contains(TDContentOptions.TDIncludeConflicts)) { var revs = GetAllRevisionsOfDocumentID(docId, true); if (revs.Count > 1) { conflicts = new AList<string>(); foreach (RevisionInternal historicalRev in revs) { if (!historicalRev.Equals(rev)) { conflicts.AddItem(historicalRev.GetRevId()); } } } } var result = new Dictionary<string, object>(); result["_id"] = docId; result["_rev"] = revId; if (rev.IsDeleted()) { result["_deleted"] = true; } if (attachmentsDict != null) { result["_attachments"] = attachmentsDict; } if (localSeq > -1) { result["_local_seq"] = localSeq; } if (revHistory != null) { result["_revisions"] = revHistory; } if (revsInfo != null) { result["_revs_info"] = revsInfo; } if (conflicts != null) { result["_conflicts"] = conflicts; } return result; }
/// <exception cref="System.Exception"/> private void TestProfilerInternal(bool useDefault) { if (!(new FilePath(MiniMRYarnCluster.Appjar)).Exists()) { Log.Info("MRAppJar " + MiniMRYarnCluster.Appjar + " not found. Not running test." ); return; } SleepJob sleepJob = new SleepJob(); JobConf sleepConf = new JobConf(mrCluster.GetConfig()); sleepConf.SetProfileEnabled(true); sleepConf.SetProfileTaskRange(true, ProfiledTaskId.ToString()); sleepConf.SetProfileTaskRange(false, ProfiledTaskId.ToString()); if (!useDefault) { // use hprof for map to profile.out sleepConf.Set(MRJobConfig.TaskMapProfileParams, "-agentlib:hprof=cpu=times,heap=sites,force=n,thread=y,verbose=n," + "file=%s"); // use Xprof for reduce to stdout sleepConf.Set(MRJobConfig.TaskReduceProfileParams, "-Xprof"); } sleepJob.SetConf(sleepConf); // 2-map-2-reduce SleepJob Job job = sleepJob.CreateJob(2, 2, 500, 1, 500, 1); job.SetJarByClass(typeof(SleepJob)); job.AddFileToClassPath(AppJar); // The AppMaster jar itself. job.WaitForCompletion(true); JobId jobId = TypeConverter.ToYarn(job.GetJobID()); ApplicationId appID = jobId.GetAppId(); int pollElapsed = 0; while (true) { Sharpen.Thread.Sleep(1000); pollElapsed += 1000; if (TerminalRmAppStates.Contains(mrCluster.GetResourceManager().GetRMContext().GetRMApps ()[appID].GetState())) { break; } if (pollElapsed >= 60000) { Log.Warn("application did not reach terminal state within 60 seconds"); break; } } NUnit.Framework.Assert.AreEqual(RMAppState.Finished, mrCluster.GetResourceManager ().GetRMContext().GetRMApps()[appID].GetState()); // Job finished, verify logs // Configuration nmConf = mrCluster.GetNodeManager(0).GetConfig(); string appIdStr = appID.ToString(); string appIdSuffix = Sharpen.Runtime.Substring(appIdStr, "application_".Length, appIdStr .Length); string containerGlob = "container_" + appIdSuffix + "_*_*"; IDictionary <TaskAttemptID, Path> taLogDirs = new Dictionary <TaskAttemptID, Path>( ); Sharpen.Pattern taskPattern = Sharpen.Pattern.Compile(".*Task:(attempt_" + appIdSuffix + "_[rm]_" + "[0-9]+_[0-9]+).*"); foreach (string logDir in nmConf.GetTrimmedStrings(YarnConfiguration.NmLogDirs)) { // filter out MRAppMaster and create attemptId->logDir map // foreach (FileStatus fileStatus in localFs.GlobStatus(new Path(logDir + Path.Separator + appIdStr + Path.Separator + containerGlob + Path.Separator + TaskLog.LogName. Syslog))) { BufferedReader br = new BufferedReader(new InputStreamReader(localFs.Open(fileStatus .GetPath()))); string line; while ((line = br.ReadLine()) != null) { Matcher m = taskPattern.Matcher(line); if (m.Matches()) { // found Task done message taLogDirs[TaskAttemptID.ForName(m.Group(1))] = fileStatus.GetPath().GetParent(); break; } } br.Close(); } } NUnit.Framework.Assert.AreEqual(4, taLogDirs.Count); // all 4 attempts found foreach (KeyValuePair <TaskAttemptID, Path> dirEntry in taLogDirs) { TaskAttemptID tid = dirEntry.Key; Path profilePath = new Path(dirEntry.Value, TaskLog.LogName.Profile.ToString()); Path stdoutPath = new Path(dirEntry.Value, TaskLog.LogName.Stdout.ToString()); if (useDefault || tid.GetTaskType() == TaskType.Map) { if (tid.GetTaskID().GetId() == ProfiledTaskId) { // verify profile.out BufferedReader br = new BufferedReader(new InputStreamReader(localFs.Open(profilePath ))); string line = br.ReadLine(); NUnit.Framework.Assert.IsTrue("No hprof content found!", line != null && line.StartsWith ("JAVA PROFILE")); br.Close(); NUnit.Framework.Assert.AreEqual(0L, localFs.GetFileStatus(stdoutPath).GetLen()); } else { NUnit.Framework.Assert.IsFalse("hprof file should not exist", localFs.Exists(profilePath )); } } else { NUnit.Framework.Assert.IsFalse("hprof file should not exist", localFs.Exists(profilePath )); if (tid.GetTaskID().GetId() == ProfiledTaskId) { // reducer is profiled with Xprof BufferedReader br = new BufferedReader(new InputStreamReader(localFs.Open(stdoutPath ))); bool flatProfFound = false; string line; while ((line = br.ReadLine()) != null) { if (line.StartsWith("Flat profile")) { flatProfFound = true; break; } } br.Close(); NUnit.Framework.Assert.IsTrue("Xprof flat profile not found!", flatProfFound); } else { NUnit.Framework.Assert.AreEqual(0L, localFs.GetFileStatus(stdoutPath).GetLen()); } } } }
public RevisionInternal GetDocumentWithIDAndRev(string id, string rev, EnumSet<Database.TDContentOptions > contentOptions) { RevisionInternal result = null; string sql; Cursor cursor = null; try { cursor = null; string cols = "revid, deleted, sequence"; if (!contentOptions.Contains(Database.TDContentOptions.TDNoBody)) { cols += ", json"; } if (rev != null) { sql = "SELECT " + cols + " FROM revs, docs WHERE docs.docid=? AND revs.doc_id=docs.doc_id AND revid=? LIMIT 1"; string[] args = new string[] { id, rev }; cursor = database.RawQuery(sql, args); } else { sql = "SELECT " + cols + " FROM revs, docs WHERE docs.docid=? AND revs.doc_id=docs.doc_id and current=1 and deleted=0 ORDER BY revid DESC LIMIT 1"; string[] args = new string[] { id }; cursor = database.RawQuery(sql, args); } if (cursor.MoveToNext()) { if (rev == null) { rev = cursor.GetString(0); } bool deleted = (cursor.GetInt(1) > 0); result = new RevisionInternal(id, rev, deleted, this); result.SetSequence(cursor.GetLong(2)); if (!contentOptions.Equals(EnumSet.Of(Database.TDContentOptions.TDNoBody))) { byte[] json = null; if (!contentOptions.Contains(Database.TDContentOptions.TDNoBody)) { json = cursor.GetBlob(3); } ExpandStoredJSONIntoRevisionWithAttachments(json, result, contentOptions); } } } catch (SQLException e) { Log.E(Database.Tag, "Error getting document with id and rev", e); } finally { if (cursor != null) { cursor.Close(); } } return result; }
public IDictionary<string, object> GetAttachmentsDictForSequenceWithContent(long sequence, EnumSet<Database.TDContentOptions> contentOptions) { System.Diagnostics.Debug.Assert((sequence > 0)); Cursor cursor = null; string[] args = new string[] { System.Convert.ToString(sequence) }; try { cursor = database.RawQuery("SELECT filename, key, type, length, revpos FROM attachments WHERE sequence=?" , args); if (!cursor.MoveToNext()) { return null; } IDictionary<string, object> result = new Dictionary<string, object>(); while (!cursor.IsAfterLast()) { bool dataSuppressed = false; int length = cursor.GetInt(3); byte[] keyData = cursor.GetBlob(1); BlobKey key = new BlobKey(keyData); string digestString = "sha1-" + Base64.EncodeBytes(keyData); string dataBase64 = null; if (contentOptions.Contains(Database.TDContentOptions.TDIncludeAttachments)) { if (contentOptions.Contains(Database.TDContentOptions.TDBigAttachmentsFollow) && length >= Database.kBigAttachmentLength) { dataSuppressed = true; } else { byte[] data = attachments.BlobForKey(key); if (data != null) { dataBase64 = Base64.EncodeBytes(data); } else { // <-- very expensive Log.W(Database.Tag, "Error loading attachment"); } } } IDictionary<string, object> attachment = new Dictionary<string, object>(); if (dataBase64 == null || dataSuppressed == true) { attachment.Put("stub", true); } if (dataBase64 != null) { attachment.Put("data", dataBase64); } if (dataSuppressed == true) { attachment.Put("follows", true); } attachment.Put("digest", digestString); string contentType = cursor.GetString(2); attachment.Put("content_type", contentType); attachment.Put("length", length); attachment.Put("revpos", cursor.GetInt(4)); string filename = cursor.GetString(0); result.Put(filename, attachment); cursor.MoveToNext(); } return result; } catch (SQLException e) { Log.E(Database.Tag, "Error getting attachments for sequence", e); return null; } finally { if (cursor != null) { cursor.Close(); } } }
public static bool IsWASD(this KeyCode keyCode) { return(WASDKeys.Contains(keyCode)); }
public bool WhenCreated_ThenOnlyContainsElementsCreatedWith <T>(EnumSet <T> set, T element) where T : struct, Enum { return(set.Contains(element)); }