public void SegmentRollback(int segment_ID, int count = 1) { SegmentEntry cur_segment = segment_list[segment_ID]; cur_segment.current -= count; segment_list[segment_ID] = cur_segment; }
public void SegmentDone(int segment_ID) { Logging.log.Trace($"Marking Segment_ID:{segment_ID} done"); SegmentEntry cur_segment = segment_list[segment_ID]; if (!cur_segment.full) { throw new System.Exception($@"Segment ID:{segment_ID} Cannot mark as done, when it is not full first"); } cur_segment.done = true; segment_list[segment_ID] = cur_segment; // Check the boundary to se if we should update the tail pointer if (cur_segment.start == (tail_pointer % memory_size)) { Logging.log.Trace($"Tailpointer was: {tail_pointer} is {cur_segment.stop}"); tail_pointer = (cur_segment.stop + 1) % memory_size; } // See if we should progress the tail segment id; for (int i = 1; i < num_segments; i++) { int scope_pointer = (i + current_tail_segment_id) % num_segments; cur_segment = segment_list[scope_pointer]; if (!cur_segment.done) { current_tail_segment_id = (current_tail_segment_id + i) % num_segments; return; } } current_tail_segment_id = (current_tail_segment_id + 1) % num_segments; }
public bool NextSegmentReady() { // Test if the next segment is ready SegmentEntry next = segment_list[next_segment_id]; return(!next.active); }
public int LoadData(int segment_ID) { // Get the current segment SegmentEntry cur_segment = segment_list[segment_ID]; if (cur_segment.done || !cur_segment.full) { // throw new System.Exception($@"Segment ID:{segment_ID} // should be Done=False, // but is Full={cur_segment.full}, Done={cur_segment.done}. // Data cannot be loaded"); return(-1); } // Calculate the offset int ret = (cur_segment.start + cur_segment.current) % memory_size; cur_segment.current++; // If the next byte is the last, we mark the segment as full, and reset counters if ((cur_segment.start + cur_segment.current) % memory_size == cur_segment.stop) { Logging.log.Info($"Segment:{segment_ID} is fully loaded based on counter, Marking it done"); SegmentDone(segment_ID); cur_segment = segment_list[segment_ID]; cur_segment.current = 0; } // Put the data back segment_list[segment_ID] = cur_segment; return(ret); }
public int SaveData(int segment_ID) { // Get the current segment SegmentEntry cur_segment = segment_list[segment_ID]; if (cur_segment.full) { throw new System.Exception($"Segment ID:{segment_ID} is marked as full, so data cannot be saved"); } // Calculate the offset int ret = (cur_segment.start + cur_segment.current) % memory_size; // Increment the current counter cur_segment.current++; // If the next byte is the last, we mark the segment as full, and reset counters if ((cur_segment.start + cur_segment.current) % memory_size == cur_segment.stop) { Logging.log.Info($"Segment:{segment_ID} is fully saved based on counter, Marking it full"); SegmentFull(segment_ID); cur_segment = segment_list[segment_ID]; cur_segment.current = 0; } // Put the data back segment_list[segment_ID] = cur_segment; return(ret); }
public void SegmentFull(int segment_ID) { Logging.log.Trace($"Marking Segment_ID:{segment_ID} full"); SegmentEntry cur_segment = segment_list[segment_ID]; cur_segment.full = true; segment_list[segment_ID] = cur_segment; }
private bool Rollback(int segment_id, int count) { SegmentEntry current = segment_list[segment_id]; current.current -= count; segment_list[segment_id] = current; return(true); }
public bool MetadataCurrentLoadSegment(MetaData metadata) { SegmentEntry cur_segment = segment_list[load_segment_id]; cur_segment.metaData = metadata; segment_list[load_segment_id] = cur_segment; return(true); }
public void SaveMetaData(int segment_ID, MetaData meta_data) { SegmentEntry cur_segment = segment_list[segment_ID]; cur_segment.metaData = meta_data; segment_list[segment_ID] = cur_segment; //segment_list[segment_ID].metaData = meta_data; }
public int SaveData(int segment_ID, int offset) { // Get the current segment SegmentEntry cur_segment = segment_list[segment_ID]; if (cur_segment.full) { throw new System.Exception($"Segment ID:{segment_ID} is marked as full, so data cannot be saved"); } return((cur_segment.start + offset) % memory_size); }
public int LoadData(int segment_ID, int offset = -1) { // Get the current segment SegmentEntry cur_segment = segment_list[segment_ID]; if (cur_segment.done || !cur_segment.full) { // throw new System.Exception($@"Segment ID:{segment_ID} // should be Done=False, // but is Full={cur_segment.full}, Done={cur_segment.done}. // Data cannot be loaded"); return(-1); } return((cur_segment.start + offset) % memory_size); }
public int LoadData() { // increment the current counter, and get address SegmentEntry current = segment_list[load_segment_id]; int x = current.current; int ret = LoadData(x); if (ret != -1) { current = segment_list[load_segment_id]; current.current++; segment_list[load_segment_id] = current; } Logging.log.Trace($"LoadData() x: {x} ret: {ret} load_segment: {load_segment_id}"); return(ret); }
public void FinishFillingCurrentSaveSegment() { Logging.log.Trace($"FinishFillingCurrentSaveSegment : save_segment_id:{save_segment_id}"); SegmentEntry current = segment_list[save_segment_id]; current.filling = false; current.reading = true; current.current = 0; segment_list[save_segment_id] = current; // Increment the load segment id save_segment_id = (save_segment_id + 1) % num_segments; // Indicate that the next block is ready to be filled // XXX : Maybe test if we can do this? // SegmentEntry next = segment_list[save_segment_id]; // next.filling = true; // segment_list[save_segment_id] = next; }
public void FinishReadingCurrentLoadSegment() { Logging.log.Trace($"FinishReadingCurrentLoadSegment : load_segment_id:{load_segment_id}"); // We have now filled and read the segment, mark it as inactive SegmentEntry current = segment_list[load_segment_id]; current.reading = false; current.active = false; current.current = 0; segment_list[load_segment_id] = current; // Increment the load segment id load_segment_id = (load_segment_id + 1) % num_segments; // Indicate that the next block is ready to be loaded // XXX : Maybe test if we can do this? // SegmentEntry next = segment_list[load_segment_id]; // next.reading = true; // segment_list[load_segment_id] = next; }
////////// interface public MultiMemorySegmentsRingBufferFIFO(int num_segments, int memory_size) { this.num_segments = num_segments; this.memory_size = memory_size; // Allocate the ring entry segments this.segment_list = new SegmentEntry[num_segments]; // Set default values for (int i = 0; i < num_segments; i++) { SegmentEntry x = segment_list[i]; x.done = true; x.full = true; x.start = 0; x.stop = 0; x.current = 0; segment_list[i] = x; } }
////////// interface public SingleMemorySegmentsRingBufferFIFO(int num_segments, int memory_size) { this.num_segments = num_segments; this.memory_size = memory_size; // Allocate the ring entry segments this.segment_list = new SegmentEntry[num_segments]; // Set default values for (int i = 0; i < num_segments; i++) { SegmentEntry x = segment_list[i]; // new segments have no range 0 to 0, and is therefore filled up, and have been read, // so we can detect they are free. x.filling = false; x.active = false; x.reading = false; x.start = 0; x.stop = 0; segment_list[i] = x; } }
public int SaveData(int index) { SegmentEntry current = segment_list[save_segment_id]; Logging.log.Trace($"Savedata(index): " + $"active: {current.active} " + $"fill: {current.filling} " + $"read: {current.reading} " + $"index: {index}"); // If the current block is active, but filling mode have not been enabled, we set the // stop byte to look at the last segment, and set the filling byte if (current.active && !current.filling && !current.reading) { Logging.log.Trace($"New non active Save block! "); // find the last segment id, and set the start byte to the stop byte of the last int last_save_segment_id = save_segment_id - 1 < 0 ? this.num_segments - 1: save_segment_id - 1; SegmentEntry last = segment_list[last_save_segment_id]; current.start = last.stop; current.stop = last.stop; current.filling = true; segment_list[save_segment_id] = current; } current = segment_list[save_segment_id]; // The segment is not in filling mode! something went wrong if (!current.filling) { throw new System.Exception("The segment is not in filling mode! we cannot save to it"); } int addr = (current.start + index) % memory_size; // Test if we should increase max size of element if (MemoryRange(current.start, current.stop) <= MemoryRange(current.start, addr)) { current.stop = (addr + 1) % memory_size; segment_list[save_segment_id] = current; } return(addr); }
public int AllocateSegment(int size) { // Get the head element, so we can modify it SegmentEntry new_segment = segment_list[next_head_segment_id]; SegmentEntry tail_segment = segment_list[current_tail_segment_id]; // Check if it is good, if not, all segments must be filled up if (!new_segment.done && !new_segment.full) { Logging.log.Fatal("The segment entry table is full!"); throw new System.Exception("The segment entry table is full!"); } // If the range is currently bigger than what we can handle, there is nothing to do // If the tail segment and the last segment is are the same, then we must be hitting // themselves (full empty buffer) if (MemoryRange(head_pointer, (tail_pointer - 1) % memory_size) < size && current_tail_segment_id != next_head_segment_id) { Logging.log.Error($"The range : {head_pointer},{tail_pointer} is not large enough for {size}"); Logging.log.Fatal($"head_pointer: {head_pointer} tail_pointer {tail_pointer} mem: {MemoryRange(head_pointer,(tail_pointer - 1)% memory_size)}"); throw new System.Exception("The range is not big enough for the allocation"); } new_segment.done = false; new_segment.full = false; new_segment.start = head_pointer; // Offset with last byte, so we do not have to subtract 1 new_segment.stop = (new_segment.start + size) % memory_size; new_segment.current = 0; // Set the new head pointer Logging.log.Trace($"Headpointer was: {head_pointer} is { (new_segment.stop + 1) % memory_size}"); head_pointer = (new_segment.stop + 1) % memory_size; // save the segment segment_list[next_head_segment_id] = new_segment; // set the head segment to a new segment Logging.log.Trace($"Allocating segment of size {size}. Segment id {next_head_segment_id}"); int ret = next_head_segment_id; next_head_segment_id = (next_head_segment_id + 1) % num_segments; return(ret); }
public int LoadData(int index) { SegmentEntry current = segment_list[load_segment_id]; Logging.log.Trace($"LoadData(index): active: {current.active} " + $"fill: {current.filling} " + $"read: {current.reading} " + $"index: {index}"); if (!current.reading) { //Logging.log.Warn($"The segment {load_segment_id} is not in reading mode! we cannot load from it"); return(-1); } int addr = (current.start + index) % memory_size; // The memory is out of range if (MemoryRange(current.start, current.stop) <= MemoryRange(current.start, addr)) { throw new System.Exception("Requesting for memory out of index for that block!"); } return(addr); }
public bool NextSegment(MetaData metadata) { Logging.log.Trace($"NextSegment: next_segment_id:{next_segment_id}"); // test if the next segment is good, else return false SegmentEntry next = segment_list[next_segment_id]; // The segment is being filled or read from, return error if (next.active) { //throw new System.Exception("the next segment is already active! "); return(false); } next.start = 0; next.stop = 0; next.filling = false; next.reading = false; next.active = true; next.metaData = metadata; segment_list[next_segment_id] = next; next_segment_id = (next_segment_id + 1) % num_segments; return(true); }
// This function moves the segment to the top of the fifo queue public int DelaySegment(int segment_ID) { // Scope in the new segment, and test if it is ready SegmentEntry new_segment = segment_list[next_head_segment_id]; int retSegment = next_head_segment_id; if (!new_segment.done && !new_segment.full) { throw new System.Exception("The segment entry table is full!"); return(-1); } // Point the new element to the old segment_list[next_head_segment_id] = segment_list[segment_ID]; //Mark the old as full and done, to free its space if (!IsSegmentFull(segment_ID)) { SegmentFull(segment_ID); } if (!IsSegmentDone(segment_ID)) { SegmentDone(segment_ID); } // reset the start and stop pointers SegmentEntry deleted = segment_list[segment_ID]; deleted.start = 0; deleted.stop = 0; segment_list[segment_ID] = deleted; // Increment the next segment id next_head_segment_id = (next_head_segment_id + 1) % num_segments; Logging.log.Warn($"Delaying segment {segment_ID} to {retSegment}"); return(retSegment); }
public bool LoadSegmentReady() { SegmentEntry current = segment_list[load_segment_id]; return(!current.filling && current.reading && current.active); }
public bool SaveSegmentReady() { SegmentEntry current = segment_list[save_segment_id]; return(!current.reading && current.active); }
public int LoadDataBytesLeft() { SegmentEntry current = segment_list[load_segment_id]; return(MemoryRange(current.start, current.stop) - current.current); }
public int LoadDataTotalBytes() { SegmentEntry current = segment_list[load_segment_id]; return(MemoryRange(current.start, current.stop)); }
void Initialize() { Recognized = false; if (BaseStream == null) { return; } BaseExecutable = new MZ(BaseStream); if (!BaseExecutable.Recognized) { return; } if (BaseExecutable.Header.new_offset >= BaseStream.Length) { return; } BaseStream.Seek(BaseExecutable.Header.new_offset, SeekOrigin.Begin); byte[] buffer = new byte[Marshal.SizeOf(typeof(NEHeader))]; BaseStream.Read(buffer, 0, buffer.Length); IntPtr hdrPtr = Marshal.AllocHGlobal(buffer.Length); Marshal.Copy(buffer, 0, hdrPtr, buffer.Length); Header = (NEHeader)Marshal.PtrToStructure(hdrPtr, typeof(NEHeader)); Marshal.FreeHGlobal(hdrPtr); if (Header.signature != SIGNATURE) { return; } Recognized = true; Type = "New Executable (NE)"; List <string> strings = new List <string>(); OperatingSystem reqOs = new OperatingSystem(); switch (Header.target_os) { case TargetOS.OS2: reqOs.Name = "OS/2"; if (Header.os_major > 0) { reqOs.MajorVersion = Header.os_major; reqOs.MinorVersion = Header.os_minor; } else { reqOs.MajorVersion = 1; reqOs.MinorVersion = 0; } if (Header.application_flags.HasFlag(ApplicationFlags.FullScreen) && !Header.application_flags.HasFlag(ApplicationFlags.GUICompatible) || !Header.application_flags.HasFlag(ApplicationFlags.FullScreen) && Header.application_flags.HasFlag(ApplicationFlags.GUICompatible)) { reqOs.Subsystem = "Console"; } else if (Header.application_flags.HasFlag(ApplicationFlags.FullScreen) && Header.application_flags.HasFlag(ApplicationFlags.GUICompatible)) { reqOs.Subsystem = "Presentation Manager"; } break; case TargetOS.Windows: case TargetOS.Win32: case TargetOS.Unknown: reqOs.Name = "Windows"; if (Header.os_major > 0) { reqOs.MajorVersion = Header.os_major; reqOs.MinorVersion = Header.os_minor; } else { switch (Header.target_os) { case TargetOS.Windows: reqOs.MajorVersion = 2; reqOs.MinorVersion = 0; break; case TargetOS.Unknown: reqOs.MajorVersion = 1; reqOs.MinorVersion = 0; break; } } break; case TargetOS.DOS: case TargetOS.Borland: reqOs.Name = "DOS"; reqOs.MajorVersion = Header.os_major; reqOs.MinorVersion = Header.os_minor; if (Header.target_os == TargetOS.Borland) { reqOs.Subsystem = "Borland Operating System Services"; } break; default: reqOs.Name = $"Unknown code {(byte)Header.target_os}"; reqOs.MajorVersion = Header.os_major; reqOs.MinorVersion = Header.os_minor; break; } RequiredOperatingSystem = reqOs; if (Header.segment_count > 0 && Header.segment_table_offset > 0 && Header.segment_table_offset + BaseExecutable.Header.new_offset < BaseStream.Length) { BaseStream.Position = Header.segment_table_offset + BaseExecutable.Header.new_offset; segments = new SegmentEntry[Header.segment_count]; for (int i = 0; i < segments.Length; i++) { buffer = new byte[Marshal.SizeOf(typeof(SegmentEntry))]; BaseStream.Read(buffer, 0, buffer.Length); segments[i] = BigEndianMarshal.ByteArrayToStructureLittleEndian <SegmentEntry>(buffer); } } // Some executable indicates 0 entries, some indicate a table start and no limit, will need to explore till next item ushort resourceUpperLimit = ushort.MaxValue; if (Header.entry_table_offset >= Header.resource_table_offset && Header.entry_table_offset <= resourceUpperLimit) { resourceUpperLimit = Header.entry_table_offset; } if (Header.segment_table_offset >= Header.resource_table_offset && Header.segment_table_offset <= resourceUpperLimit) { resourceUpperLimit = Header.segment_table_offset; } if (Header.module_reference_offset >= Header.resource_table_offset && Header.module_reference_offset <= resourceUpperLimit) { resourceUpperLimit = Header.module_reference_offset; } if (Header.nonresident_names_offset >= Header.resource_table_offset && Header.nonresident_names_offset <= resourceUpperLimit) { resourceUpperLimit = (ushort)Header.nonresident_names_offset; } if (Header.resident_names_offset >= Header.resource_table_offset && Header.resident_names_offset <= resourceUpperLimit) { resourceUpperLimit = Header.resident_names_offset; } if (Header.imported_names_offset >= Header.resource_table_offset && Header.imported_names_offset <= resourceUpperLimit) { resourceUpperLimit = Header.imported_names_offset; } if (Header.resource_table_offset < resourceUpperLimit && Header.resource_table_offset != 0) { if (Header.target_os == TargetOS.Windows || Header.target_os == TargetOS.Win32 || Header.target_os == TargetOS.Unknown) { Resources = GetResources(BaseStream, BaseExecutable.Header.new_offset, Header.resource_table_offset, resourceUpperLimit); for (int t = 0; t < Resources.types.Length; t++) { Resources.types[t].resources = Resources.types[t].resources.OrderBy(r => r.name).ToArray(); } Resources.types = Resources.types.OrderBy(t => t.name).ToArray(); Versions = GetVersions().ToArray(); strings.AddRange(from v in Versions from s in v.StringsByLanguage from k in s.Value select k.Value); foreach (ResourceType rtype in Resources.types) { if (rtype.name != "RT_STRING") { continue; } strings.AddRange(GetWindowsStrings(rtype)); } } else if (Header.target_os == TargetOS.OS2 && segments != null && Header.resource_entries > 0) { BaseStream.Position = BaseExecutable.Header.new_offset + Header.resource_table_offset; buffer = new byte[Header.resource_entries * 4]; BaseStream.Read(buffer, 0, buffer.Length); ResourceTableEntry[] entries = new ResourceTableEntry[Header.resource_entries]; for (int i = 0; i < entries.Length; i++) { entries[i].etype = BitConverter.ToUInt16(buffer, i * 4 + 0); entries[i].ename = BitConverter.ToUInt16(buffer, i * 4 + 2); } SegmentEntry[] resourceSegments = new SegmentEntry[Header.resource_entries]; Array.Copy(segments, Header.segment_count - Header.resource_entries, resourceSegments, 0, Header.resource_entries); SegmentEntry[] realSegments = new SegmentEntry[Header.segment_count - Header.resource_entries]; Array.Copy(segments, 0, realSegments, 0, realSegments.Length); segments = realSegments; SortedDictionary <ushort, List <Resource> > os2resources = new SortedDictionary <ushort, List <Resource> >(); for (int i = 0; i < entries.Length; i++) { os2resources.TryGetValue(entries[i].etype, out List <Resource> thisResourceType); if (thisResourceType == null) { thisResourceType = new List <Resource>(); } Resource thisResource = new Resource { id = entries[i].ename, name = $"{entries[i].ename}", flags = (ResourceFlags)resourceSegments[i].dwFlags, dataOffset = (uint)(resourceSegments[i].dwLogicalSectorOffset << Header.alignment_shift), length = resourceSegments[i].dwSegmentLength }; if (thisResource.length == 0) { thisResource.length = 65536; } if (thisResource.dataOffset == 0) { thisResource.dataOffset = 65536; } if ((resourceSegments[i].dwFlags & (ushort)SegmentFlags.Huge) == (ushort)SegmentFlags.Huge) { thisResource.length <<= Header.alignment_shift; } thisResource.data = new byte[thisResource.length]; BaseStream.Position = thisResource.dataOffset; BaseStream.Read(thisResource.data, 0, thisResource.data.Length); thisResourceType.Add(thisResource); os2resources.Remove(entries[i].etype); os2resources.Add(entries[i].etype, thisResourceType); } if (os2resources.Count > 0) { Resources = new ResourceTable(); int counter = 0; Resources.types = new ResourceType[os2resources.Count]; foreach (KeyValuePair <ushort, List <Resource> > kvp in os2resources) { Resources.types[counter].count = (ushort)kvp.Value.Count; Resources.types[counter].id = kvp.Key; Resources.types[counter].name = Os2.Resources.IdToName(kvp.Key); Resources.types[counter].resources = kvp.Value.OrderBy(r => r.id).ToArray(); counter++; } foreach (ResourceType rtype in Resources.types) { if (rtype.name != "RT_STRING") { continue; } strings.AddRange(GetOs2Strings(rtype)); } } } } resourceUpperLimit = ushort.MaxValue; if (Header.entry_table_offset >= Header.module_reference_offset && Header.entry_table_offset <= resourceUpperLimit) { resourceUpperLimit = Header.entry_table_offset; } if (Header.segment_table_offset >= Header.module_reference_offset && Header.segment_table_offset <= resourceUpperLimit) { resourceUpperLimit = Header.segment_table_offset; } if (Header.resource_table_offset >= Header.module_reference_offset && Header.resource_table_offset <= resourceUpperLimit) { resourceUpperLimit = Header.resource_table_offset; } if (Header.nonresident_names_offset >= Header.module_reference_offset && Header.nonresident_names_offset <= resourceUpperLimit) { resourceUpperLimit = (ushort)Header.nonresident_names_offset; } if (Header.imported_names_offset >= Header.module_reference_offset && Header.imported_names_offset <= resourceUpperLimit) { resourceUpperLimit = Header.imported_names_offset; } if (Header.module_reference_offset < resourceUpperLimit && Header.module_reference_offset != 0 && Header.reference_count > 0) { short[] referenceOffsets = new short[Header.reference_count]; buffer = new byte[2]; BaseStream.Position = Header.module_reference_offset + BaseExecutable.Header.new_offset; for (int i = 0; i < Header.reference_count; i++) { BaseStream.Read(buffer, 0, 2); referenceOffsets[i] = BitConverter.ToInt16(buffer, 0); } ImportedNames = new string[Header.reference_count]; for (int i = 0; i < Header.reference_count; i++) { BaseStream.Position = Header.imported_names_offset + BaseExecutable.Header.new_offset + referenceOffsets[i]; int len = BaseStream.ReadByte(); buffer = new byte[len]; BaseStream.Read(buffer, 0, len); ImportedNames[i] = Encoding.ASCII.GetString(buffer); } } resourceUpperLimit = ushort.MaxValue; if (Header.entry_table_offset >= Header.resident_names_offset && Header.entry_table_offset <= resourceUpperLimit) { resourceUpperLimit = Header.entry_table_offset; } if (Header.segment_table_offset >= Header.resident_names_offset && Header.segment_table_offset <= resourceUpperLimit) { resourceUpperLimit = Header.segment_table_offset; } if (Header.module_reference_offset >= Header.resident_names_offset && Header.module_reference_offset <= resourceUpperLimit) { resourceUpperLimit = Header.module_reference_offset; } if (Header.nonresident_names_offset >= Header.resident_names_offset && Header.nonresident_names_offset <= resourceUpperLimit) { resourceUpperLimit = (ushort)Header.nonresident_names_offset; } if (Header.imported_names_offset >= Header.resident_names_offset && Header.imported_names_offset <= resourceUpperLimit) { resourceUpperLimit = Header.imported_names_offset; } if (Header.resident_names_offset < resourceUpperLimit && Header.resident_names_offset != 0) { ResidentNames = GetResidentStrings(BaseStream, BaseExecutable.Header.new_offset, Header.resident_names_offset, resourceUpperLimit); if (ResidentNames.Length >= 1) { ModuleName = ResidentNames[0].name; if (ResidentNames.Length > 1) { ResidentName[] newResidentNames = new ResidentName[ResidentNames.Length - 1]; Array.Copy(ResidentNames, 1, newResidentNames, 0, ResidentNames.Length - 1); ResidentNames = newResidentNames; } else { ResidentNames = null; } } } if (Header.nonresident_table_size > 0) { NonResidentNames = GetResidentStrings(BaseStream, Header.nonresident_names_offset, 0, (ushort)(Header.nonresident_names_offset + Header.nonresident_table_size)); if (NonResidentNames.Length >= 1) { ModuleDescription = NonResidentNames[0].name; if (NonResidentNames.Length > 1) { ResidentName[] newNonResidentNames = new ResidentName[NonResidentNames.Length - 1]; Array.Copy(NonResidentNames, 1, newNonResidentNames, 0, NonResidentNames.Length - 1); NonResidentNames = newNonResidentNames; } else { NonResidentNames = null; } } } if (!string.IsNullOrEmpty(ModuleName)) { strings.Add(ModuleName); } if (!string.IsNullOrEmpty(ModuleDescription)) { strings.Add(ModuleDescription); } if (strings.Count > 0) { Strings = strings.Distinct().OrderBy(s => s); } if (segments == null) { return; } List <Segment> libsegs = new List <Segment>(); foreach (SegmentEntry seg in segments) { Segment libseg = new Segment { Flags = $"{(SegmentFlags)(seg.dwFlags & SEGMENT_FLAGS_MASK)}", Name = (SegmentType)(seg.dwFlags & SEGMENT_TYPE_MASK) == SegmentType.Code ? ".text" : ".data", Offset = seg.dwLogicalSectorOffset << Header.alignment_shift, Size = seg.dwSegmentLength }; if (Header.target_os == TargetOS.OS2 && (seg.dwFlags & (int)SegmentFlags.Huge) == (int)SegmentFlags.Huge) { libseg.Size <<= Header.alignment_shift; } libsegs.Add(libseg); } Segments = libsegs.OrderBy(s => s.Offset).ToArray(); }
public int SegmentBytesLeft(int segment_ID) { SegmentEntry cur_segment = segment_list[segment_ID]; return(MemoryRange(cur_segment.current, cur_segment.stop)); }