public MemoryRegion Find(Pointer address) { Contract.Ensures(Contract.Result<MemoryRegion>() == null || Contract.Result<MemoryRegion>().GhostOwner == GhostOwner); Contract.Ensures(Contract.Result<MemoryRegion>() == null || Contract.Result<MemoryRegion>().BackingFile == null || Contract.Result<MemoryRegion>().BackingFile.GhostOwner == GhostOwner); Contract.Ensures(Contract.Result<MemoryRegion>() == null || Contract.Result<MemoryRegion>().GhostOwner == GhostOwner); var h = Head; while (h != null) { // Object invariant of h // To be supported in next release of code contract // See http://social.msdn.microsoft.com/Forums/en-US/codecontracts/thread/17f9af7a-849f-4c91-93b4-95a98763d080 // Contract.Assume(h.BackingFile == null || h.BackingFile.GhostOwner == h.GhostOwner); // // Property of the container // Contract.Assume(h.GhostOwner == GhostOwner); if (h.StartAddress <= address && address < h.StartAddress + h.Size) { return h; } h = h.Next; } return null; }
/* * Create a new memory region in the address space. * * If there are any overlaps between the current address space and the requested one, * this function will unamp the overlapped portions of the address space before * mapping in the new memory region. * * Several clients, including the dynamic linker relies on this feature. See mmap(2) * for details. * * This function requires vaddr and memorySize are aligned to the page boundary. */ internal int AddMapping(uint access, int flags, File file, uint fileOffset, int fileSize, Pointer vaddr, int memorySize) { Contract.Requires(file == null || file.GhostOwner == GhostOwner); Contract.Requires(0 <= fileSize && fileSize <= memorySize); Contract.Requires(file == null || fileSize > 0); Contract.Requires(file != null || (fileSize == 0 && fileOffset == 0)); Contract.Ensures(Brk == Contract.OldValue(Brk)); if (memorySize <= 0 || Arch.ArchDefinition.PageOffset(memorySize) != 0) return -ErrorCode.EINVAL; var diff = Arch.ArchDefinition.PageOffset(vaddr.ToUInt32()); if (diff != 0) return -ErrorCode.EINVAL; var r = RemoveMapping(vaddr, memorySize); if (r != 0) { return r; } var newRegion = new MemoryRegion(GhostOwner, access, flags, file, fileOffset, fileSize, vaddr, memorySize, false); Insert(newRegion); return 0; }
public static unsafe void DumpBuf(Pointer buf, int size) { Arch.Console.Write("Dump: size="); Arch.Console.Write(size); Arch.Console.WriteLine(); for (var i = 1; i <= (size + 3) / 4; ++i) { int* p = (int*)buf.ToPointer() + i - 1; Arch.Console.Write(*p); Arch.Console.Write(' '); if (i % 8 == 0) Arch.Console.WriteLine(); } Arch.Console.WriteLine(); Arch.Console.WriteLine(); }
public static int UnmarshalReadBuffer(Thread thr, ByteBufferRef completionBuf, ref sys_binder_write_desc desc, UserPtr readBuffer, int readBufferSize) { var proc = thr.Parent; var marshaledPtr = new Pointer(completionBuf.Location); //Arch.Console.Write("read_consumed:"); //Arch.Console.Write(desc.read_consumed); //BinderIPCMarshaler.DumpBuf(new Pointer(completionBuf.Location), (int)desc.read_consumed); if (proc.binderVMStart == UserPtr.Zero) { Arch.Console.WriteLine("proc.binderVMStart == UserPtr.Zero"); return -ErrorCode.EFAULT; } if (UnmarshalDataEntries(thr, completionBuf, ref desc) != 0) { Arch.Console.WriteLine("UnmarshalDataEntries failed"); return -ErrorCode.ENOMEM; } if (desc.read_consumed > completionBuf.Length) { Arch.Console.WriteLine("UnmarshalReadBuffer: bad input"); return -ErrorCode.ENOMEM; } // Patch pointers and convert file descriptors var b = completionBuf.Slice(0, desc.read_consumed); if (PatchReadBuffer(thr, b) != 0) { Arch.Console.WriteLine("Failed to patch read buffer"); return -ErrorCode.EINVAL; } if (readBuffer.Write(thr, marshaledPtr, desc.read_consumed) != 0) { Arch.Console.WriteLine("readBuffer.Write failed"); return -ErrorCode.ENOMEM; } return 0; }
internal MemoryRegion(Process owner, uint access, int flags, File file, uint fileOffset, int fileSize, Pointer vaddr, int size, bool isFixed) { Contract.Requires(file == null || file.GhostOwner == owner); Contract.Ensures(GhostOwner == owner); this.Access = access; this.Flags = flags; this.BackingFile = file; this.FileOffset = fileOffset; this.FileSize = fileSize; this.StartAddress = vaddr; this.Size = size; this.Next = null; this.IsFixed = isFixed; this.GhostOwner = owner; if (file != null) file.inode.IncreaseRefCount(); }
internal Process(ASCIIString name, AndroidApplicationInfo appInfo) { Contract.Ensures(Credential.GhostOwner == this); Contract.Ensures(Space.GhostOwner == this); Contract.Ensures(Files.GhostOwner == this); this.AppInfo = appInfo; SFSFilePrefix = Util.StringToByteArray(appInfo.DataDir, false); this.Files = new FileDescriptorTable(this); const uint UTCB_BOTTOM = 0xc0000000; var utcb = new Pointer(UTCB_BOTTOM); // TODO: expose interface to change the name of the process var archAddressSpace = Arch.ArchAddressSpace.Create(name, utcb, Arch.ArchDefinition.UTCBSizeShift + Arch.ArchDefinition.MaxThreadPerTaskLog2); this.Space = new AddressSpace(this, archAddressSpace); this.Credential = SecurityManager.GetCredential(name, this); }
internal static extern IntPtr sel4_alloc_new(Pointer start, Pointer end);
internal static extern void sel4_alloc_free(IntPtr handle, Pointer page, int size);
public void Initialize(Pointer start, int num_of_pages) { this.handle = NativeMethods.sel4_alloc_new(start, start + num_of_pages * Arch.ArchDefinition.PageSize); this.Start = start; this.End = start + (num_of_pages << Arch.ArchDefinition.PageShift); }
/* * Create a new memory region in the address space. * * If there are any overlaps between the current address space and the requested one, * this function will unamp the overlapped portions of the address space before * mapping in the new memory region. * * Several clients, including the dynamic linker relies on this feature. See mmap(2) * for details. * * This function requires vaddr and memorySize are aligned to the page boundary. */ internal int AddMapping(uint access, int flags, File file, uint fileOffset, int fileSize, Pointer vaddr, int memorySize) { Contract.Requires(file == null || file.GhostOwner == GhostOwner); Contract.Requires(0 <= fileSize && fileSize <= memorySize); Contract.Requires(file == null || fileSize > 0); Contract.Requires(file != null || (fileSize == 0 && fileOffset == 0)); Contract.Ensures(Brk == Contract.OldValue(Brk)); if (memorySize <= 0 || Arch.ArchDefinition.PageOffset(memorySize) != 0) { return(-ErrorCode.EINVAL); } var diff = Arch.ArchDefinition.PageOffset(vaddr.ToUInt32()); if (diff != 0) { return(-ErrorCode.EINVAL); } var r = RemoveMapping(vaddr, memorySize); if (r != 0) { return(r); } var newRegion = new MemoryRegion(GhostOwner, access, flags, file, fileOffset, fileSize, vaddr, memorySize, false); Insert(newRegion); return(0); }
public static void HandlePageFault(Process process, uint faultType, Pointer faultAddress, Pointer faultIP, out Pointer physicalPage, out uint permission) { // Object invariants of Process Contract.Assume(process.Space.GhostOwner == process); Contract.Assume(process.Space.Head.GhostOwner == process); // Never map page 0 if (faultAddress.ToUInt32() < Arch.ArchDefinition.PageSize) { physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; } SyscallProfiler.EnterPageFault(); var space = process.Space; var region = space.Find(faultAddress); if (region != null && (faultType & region.Access) != 0) { /* * Check whether the kernel has allocated a page for this address, * which might be the case due to UserPtr.Read() / UserPtr.Write(). */ var mapped_in_page = space.UserToVirt(new UserPtr(faultAddress)); if (mapped_in_page != Pointer.Zero) { physicalPage = PageIndex(mapped_in_page); permission = region.Access & MemoryRegion.FAULT_MASK; return; } // If the page is a shared page from Linux, we'll need to ask linux to grab the page // Otherwise let's just allocate a fresh one. var shared_memory_region = IsAlienSharedRegion(region); var ghost_page_from_fresh_memory = false; ByteBufferRef buf; if (shared_memory_region) { buf = Globals.LinuxMemoryAllocator.GetUserPage(process, faultType, ToShadowProcessAddress(faultAddress, region)); if (!buf.isValid) { Arch.Console.WriteLine("pager: cannot map in alien page."); space.DumpAll(); physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; } } else { buf = Globals.PageAllocator.AllocPage(); ghost_page_from_fresh_memory = true; if (!buf.isValid) { Arch.Console.WriteLine("Cannot allocate new pages"); Utils.Panic(); } if (region.BackingFile != null) { var rel_pos = (PageIndex(faultAddress) - region.StartAddress); uint pos = (uint)((ulong)rel_pos + region.FileOffset); var readSizeLong = region.FileSize - rel_pos; if (readSizeLong < 0) readSizeLong = 0; else if (readSizeLong > Arch.ArchDefinition.PageSize) readSizeLong = Arch.ArchDefinition.PageSize; int readSize = (int)readSizeLong; Contract.Assert(region.BackingFile.GhostOwner == process); var r = region.BackingFile.Read(buf, 0, readSize, ref pos); if (r < 0) r = 0; Utils.Assert(r <= Arch.ArchDefinition.PageSize); if (r < Arch.ArchDefinition.PageSize) buf.ClearAfter(r); } else { buf.Clear(); } } Contract.Assert(shared_memory_region ^ ghost_page_from_fresh_memory); var page = new Pointer(buf.Location); space.AddIntoWorkingSet(new UserPtr(PageIndex(faultAddress)), page); SyscallProfiler.ExitPageFault(); physicalPage = page; permission = region.Access & MemoryRegion.FAULT_MASK; return; } else { /* * TODO: mmap2 enables the application requests an automatically expandable * region (e.g., a stack) * * The feature doesn't seem to be actively used by the applications, since * both the C runtime and the pthread library initializes stack explicitly. * * The feature is currently unimplemented. */ } physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; }
private void RemoveMappingCenter(Pointer vaddr, int size, ref MemoryRegion prev, out int ret, out bool changed) { Contract.Ensures(Brk == Contract.OldValue(Brk)); changed = false; var r = prev.Next; var end = vaddr + size; while (r != null && !r.IsFixed && r.End <= end) { changed = true; RemoveNode(prev, r); r = prev.Next; } if (r != null && r.End <= end && r.IsFixed) { ret = -ErrorCode.EINVAL; return; } if (r == null || r.StartAddress >= end) { ret = 0; return; } ret = 1; return; }
internal bool UpdateAccessRightRange(Pointer start, int size, uint access) { if (Arch.ArchDefinition.PageOffset(start.ToUInt32()) != 0) return false; var prev = Head; var r = prev.Next; var end = start + size; while (r != null && r.StartAddress < end) { if (!r.OverlappedInt(start, size) || access == r.Access) { prev = r; r = r.Next; } else { if (r.IsFixed) { return false; } if (r.StartAddress < start) { var region_end = r.End; var middleRegion = Split(r, start - r.StartAddress); if (end < region_end) { // update middle region prev = Split(middleRegion, size); UpdateAccessRights(middleRegion, access); return true; } else { UpdateAccessRights(middleRegion, access); TryMergeWithNext(middleRegion); prev = middleRegion; r = prev.Next; } } else { if (r.End <= end) { UpdateAccessRights(r, access); TryMergeWithNext(r); var merged = TryMergeWithNext(prev); if (merged) { r = prev.Next; } else { prev = prev.Next; r = prev.Next; } } else { var right_region = Split(r, end - r.StartAddress); UpdateAccessRights(r, access); var merged = TryMergeWithNext(prev); prev = right_region; r = prev.Next; } } } } return true; }
private static int PatchReadTransaction(Thread current, Pointer marshaledBufferStart, ref binder_transaction_data tr) { if (tr.data_size != 0) tr.data_buffer += current.Parent.binderVMStart.Value.ToUInt32(); if (tr.offsets_size != 0) tr.data_offsets += current.Parent.binderVMStart.Value.ToUInt32(); if (GainingWindowFocus(current, tr)) Globals.SecurityManager.OnActiveProcessChanged(current.Parent); for (var off_ptr = tr.data_offsets; off_ptr < tr.data_offsets + tr.offsets_size; off_ptr += 4) { var fp = new flat_binder_object(); int off; if (off_ptr.Read(current, out off) != 0) { Arch.Console.Write("Can't get offset"); return -1; } var fp_ptr = tr.data_buffer + off; if (fp_ptr.Read(current, out fp) != 0) { Arch.Console.Write("Read fp failed, ptr:"); Arch.Console.Write(fp_ptr.Value.ToInt32()); Arch.Console.WriteLine(); return -1; } //Arch.Console.Write("off_ptr:"); //Arch.Console.Write(tr->data_offsets.Value.ToUInt32()); //Arch.Console.Write(" Off end:"); //Arch.Console.Write((tr->data_offsets + tr->offsets_size).Value.ToUInt32()); //Arch.Console.WriteLine(); switch (fp.type) { case BinderINode.BINDER_TYPE_FD: { var proc = current.Parent; var linux_fd = fp.binderOrHandle.Value.ToInt32(); GenericINode inode; if (IsScreenSharingTransaction(current, ref tr)) inode = new ScreenBufferINode(linux_fd, proc.helperPid); else inode = new BinderSharedINode(linux_fd, proc.helperPid); int fd = proc.GetUnusedFd(); proc.InstallFd(fd, new File(proc, inode, FileSystem.O_RDWR, 0)); // Patch the data structure (fp_ptr + flat_binder_object.OFFSET_OF_HANDLE).Write(current, fd); } break; case BinderINode.BINDER_TYPE_HANDLE: // sliently ignore it (it seems safe) break; default: Arch.Console.Write("BinderINode::PatchReadTransaction ignoring "); Arch.Console.Write(fp.type); Arch.Console.WriteLine(); break; } } return 0; }
internal bool ContainRegion(Pointer targetAddr, int length) { var r = Head; while (r != null && !r.OverlappedInt(targetAddr, length)) r = r.Next; return r != null; }
public void AddIntoWorkingSet(UserPtr userPtr, Pointer virtualAddr) { workingSet.Add(userPtr, virtualAddr); }
private void RemoveWorkingSet(Pointer vaddr, int size) { workingSet.Remove(this, new UserPtr(vaddr), new UserPtr(vaddr) + size); }
public static void HandlePageFault(Process process, uint faultType, Pointer faultAddress, Pointer faultIP, out Pointer physicalPage, out uint permission) { // Object invariants of Process Contract.Assume(process.Space.GhostOwner == process); Contract.Assume(process.Space.Head.GhostOwner == process); // Never map page 0 if (faultAddress.ToUInt32() < Arch.ArchDefinition.PageSize) { physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; } SyscallProfiler.EnterPageFault(); var space = process.Space; var region = space.Find(faultAddress); if (region != null && (faultType & region.Access) != 0) { /* * Check whether the kernel has allocated a page for this address, * which might be the case due to UserPtr.Read() / UserPtr.Write(). */ var mapped_in_page = space.UserToVirt(new UserPtr(faultAddress)); if (mapped_in_page != Pointer.Zero) { physicalPage = PageIndex(mapped_in_page); permission = region.Access & MemoryRegion.FAULT_MASK; return; } // If the page is a shared page from Linux, we'll need to ask linux to grab the page // Otherwise let's just allocate a fresh one. var shared_memory_region = IsAlienSharedRegion(region); var ghost_page_from_fresh_memory = false; ByteBufferRef buf; if (shared_memory_region) { buf = Globals.LinuxMemoryAllocator.GetUserPage(process, faultType, ToShadowProcessAddress(faultAddress, region)); if (!buf.isValid) { Arch.Console.WriteLine("pager: cannot map in alien page."); space.DumpAll(); physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; } } else { buf = Globals.PageAllocator.AllocPage(); ghost_page_from_fresh_memory = true; if (!buf.isValid) { Arch.Console.WriteLine("Cannot allocate new pages"); Utils.Panic(); } if (region.BackingFile != null) { var rel_pos = (PageIndex(faultAddress) - region.StartAddress); uint pos = (uint)((ulong)rel_pos + region.FileOffset); var readSizeLong = region.FileSize - rel_pos; if (readSizeLong < 0) { readSizeLong = 0; } else if (readSizeLong > Arch.ArchDefinition.PageSize) { readSizeLong = Arch.ArchDefinition.PageSize; } int readSize = (int)readSizeLong; Contract.Assert(region.BackingFile.GhostOwner == process); var r = region.BackingFile.Read(buf, 0, readSize, ref pos); if (r < 0) { r = 0; } Utils.Assert(r <= Arch.ArchDefinition.PageSize); if (r < Arch.ArchDefinition.PageSize) { buf.ClearAfter(r); } } else { buf.Clear(); } } Contract.Assert(shared_memory_region ^ ghost_page_from_fresh_memory); var page = new Pointer(buf.Location); space.AddIntoWorkingSet(new UserPtr(PageIndex(faultAddress)), page); SyscallProfiler.ExitPageFault(); physicalPage = page; permission = region.Access & MemoryRegion.FAULT_MASK; return; } else { /* * TODO: mmap2 enables the application requests an automatically expandable * region (e.g., a stack) * * The feature doesn't seem to be actively used by the applications, since * both the C runtime and the pthread library initializes stack explicitly. * * The feature is currently unimplemented. */ } physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; }
public static Pointer PageIndex(Pointer addr) { return(addr & Arch.ArchDefinition.PageIndexMask); }
internal bool UpdateAccessRightRange(Pointer start, int size, uint access) { if (Arch.ArchDefinition.PageOffset(start.ToUInt32()) != 0) { return(false); } var prev = Head; var r = prev.Next; var end = start + size; while (r != null && r.StartAddress < end) { if (!r.OverlappedInt(start, size) || access == r.Access) { prev = r; r = r.Next; } else { if (r.IsFixed) { return(false); } if (r.StartAddress < start) { var region_end = r.End; var middleRegion = Split(r, start - r.StartAddress); if (end < region_end) { // update middle region prev = Split(middleRegion, size); UpdateAccessRights(middleRegion, access); return(true); } else { UpdateAccessRights(middleRegion, access); TryMergeWithNext(middleRegion); prev = middleRegion; r = prev.Next; } } else { if (r.End <= end) { UpdateAccessRights(r, access); TryMergeWithNext(r); var merged = TryMergeWithNext(prev); if (merged) { r = prev.Next; } else { prev = prev.Next; r = prev.Next; } } else { var right_region = Split(r, end - r.StartAddress); UpdateAccessRights(r, access); var merged = TryMergeWithNext(prev); prev = right_region; r = prev.Next; } } } } return(true); }
public int Write(Thread current, Pointer kernelPointer, int length) { return Write(current.Parent, kernelPointer, length); }
public static int mmap2(Thread current, UserPtr addr, int length, int prot, int flags, int fd, int pgoffset) { Contract.Requires(current.Parent == current.Parent.Space.GhostOwner); Pointer targetAddr = new Pointer(Arch.ArchDefinition.PageAlign(addr.Value.ToUInt32())); //Arch.Console.Write("mmap2:"); //Arch.Console.Write(addr.Value.ToInt32()); //Arch.Console.Write(" sz="); //Arch.Console.Write(length); //Arch.Console.Write(" prot="); //Arch.Console.Write(prot); //Arch.Console.WriteLine(); var proc = current.Parent; var space = proc.Space; Contract.Assert(proc == space.GhostOwner); if ((flags & MAP_FIXED) == 0) { if (targetAddr == Pointer.Zero || space.ContainRegion(targetAddr, length)) targetAddr = space.FindFreeRegion(length); } else if (Arch.ArchDefinition.PageOffset(addr.Value.ToInt32()) != 0) { return -ErrorCode.EINVAL; } targetAddr = new Pointer(Arch.ArchDefinition.PageIndex(targetAddr.ToUInt32())); if (targetAddr == Pointer.Zero || length == 0) return -ErrorCode.EINVAL; File file = null; GenericINode inode = null; if ((flags & MAP_ANONYMOUS) == 0) { file = proc.LookupFile(fd); if (file == null) return -ErrorCode.EBADF; inode = file.inode; } int memorySize = Arch.ArchDefinition.PageAlign(length); // fix for code contract if (length > memorySize) length = memorySize; if ((file != null && length == 0) || length < 0) return -ErrorCode.EINVAL; if (file == null) { pgoffset = 0; length = 0; } // // Be careful for shared mapping -- which could be a shared memory region coming from Linux. // In this case we'll need to (1) call mmap() in the shadow process to obtain a valid mapping // (2) when a page fault happens, grabs the physical page from linux. // if ((flags & MAP_SHARED) != 0 && SharedWithLinux(inode)) { // Don't know how to deal with it... if (addr != UserPtr.Zero) return -ErrorCode.EINVAL; var vaddr = Arch.IPCStubs.linux_sys_alien_mmap2(current.Parent.helperPid, addr.Value, length, prot, flags, inode.LinuxFd, pgoffset); if (vaddr > AddressSpace.KERNEL_OFFSET) return -ErrorCode.EINVAL; switch (inode.kind) { case GenericINode.INodeKind.BinderSharedINodeKind: case GenericINode.INodeKind.AshmemINodeKind: case GenericINode.INodeKind.ScreenBufferINodeKind: inode.AlienSharedMemoryINode.vaddrInShadowProcess = new Pointer(vaddr); break; default: // UNIMPLEMENTED... let's return EINVAL to make sure we can catch it. return -ErrorCode.EINVAL; } } var r = space.AddMapping(ProtToAccessFlag(prot), flags, file, (uint)pgoffset * Arch.ArchDefinition.PageSize, length, targetAddr, memorySize); if (r < 0) return r; // // HACK for binder IPC // if (inode != null && inode.kind == GenericINode.INodeKind.BinderINodeKind) { proc.binderVMStart = new UserPtr(targetAddr); proc.binderVMSize = length; } return targetAddr.ToInt32(); }
public UserPtr(Pointer v) { _value = v; }
private void RemoveWorkingSet(Pointer vaddr, int size) { Contract.Ensures(Brk == Contract.OldValue(Brk)); workingSet.Remove(this, new UserPtr(vaddr), new UserPtr(vaddr) + size); }
private int Write(Process process, Pointer dst, int length) { var bytesLeft = length; var src = _value; var dst_buf = new ByteBufferRef(dst.ToIntPtr(), length); var cursor = 0; while (bytesLeft > 0) { var region = process.Space.Find(src); // Invalid mapping if (region == null || region.IsFixed) { return bytesLeft; } var off = Arch.ArchDefinition.PageOffset(src.ToUInt32()); var virtualAddr = process.Space.UserToVirt(new UserPtr(src)); if (virtualAddr == Pointer.Zero) { // Page isn't present, try to bring it in. uint permission; Pager.HandlePageFault(process, MemoryRegion.FAULT_MASK, src, Pointer.Zero, out virtualAddr, out permission); if (virtualAddr == Pointer.Zero) break; } var virtual_page = Arch.ArchDefinition.PageIndex(virtualAddr.ToUInt32()); var page_buf = new ByteBufferRef(new IntPtr(virtual_page), Arch.ArchDefinition.PageSize); var b = Arch.ArchDefinition.PageSize - off; var bytesTobeCopied = b > bytesLeft ? bytesLeft : b; var dst_buf_page = page_buf.Slice(off, bytesTobeCopied); for (var i = 0; i < bytesTobeCopied; ++i) { dst_buf_page.Set(i, dst_buf.Get(cursor + i)); } bytesLeft -= bytesTobeCopied; src += bytesTobeCopied; cursor += bytesTobeCopied; } return bytesLeft; }
private void RemoveMappingLeft(Pointer vaddr, int size, out int ret, out bool changed, out MemoryRegion prev) { Contract.Ensures(Brk == Contract.OldValue(Brk)); prev = Head; var r = prev.Next; var end = vaddr + size; var oldBrk = Brk; if (Head.OverlappedInt(vaddr, size)) { ret = -ErrorCode.EINVAL; changed = false; return; } while (r != null && r.End <= vaddr) { prev = r; r = r.Next; } // No overlaps if (r == null || r.StartAddress >= end) { ret = 0; changed = false; return; } if (r.IsFixed) { ret = -ErrorCode.EINVAL; changed = false; return; } if (r.StartAddress < vaddr) { if (end < r.End) { var offset = vaddr - r.StartAddress; var middleRegion = Split(r, offset); } else { r.CutRight(r.End - vaddr); } prev = r; r = r.Next; ret = 1; changed = true; Contract.Assert(Brk == oldBrk); return; } ret = 1; changed = false; return; }
public static int mmap2(Thread current, UserPtr addr, int length, int prot, int flags, int fd, int pgoffset) { Contract.Requires(current.Parent == current.Parent.Space.GhostOwner); Pointer targetAddr = new Pointer(Arch.ArchDefinition.PageAlign(addr.Value.ToUInt32())); //Arch.Console.Write("mmap2:"); //Arch.Console.Write(addr.Value.ToInt32()); //Arch.Console.Write(" sz="); //Arch.Console.Write(length); //Arch.Console.Write(" prot="); //Arch.Console.Write(prot); //Arch.Console.WriteLine(); var proc = current.Parent; var space = proc.Space; Contract.Assert(proc == space.GhostOwner); if ((flags & MAP_FIXED) == 0) { if (targetAddr == Pointer.Zero || space.ContainRegion(targetAddr, length)) { targetAddr = space.FindFreeRegion(length); } } else if (Arch.ArchDefinition.PageOffset(addr.Value.ToInt32()) != 0) { return(-ErrorCode.EINVAL); } targetAddr = new Pointer(Arch.ArchDefinition.PageIndex(targetAddr.ToUInt32())); if (targetAddr == Pointer.Zero || length == 0) { return(-ErrorCode.EINVAL); } File file = null; GenericINode inode = null; if ((flags & MAP_ANONYMOUS) == 0) { file = proc.LookupFile(fd); if (file == null) { return(-ErrorCode.EBADF); } inode = file.inode; } int memorySize = Arch.ArchDefinition.PageAlign(length); // fix for code contract if (length > memorySize) { length = memorySize; } if ((file != null && length == 0) || length < 0) { return(-ErrorCode.EINVAL); } if (file == null) { pgoffset = 0; length = 0; } // // Be careful for shared mapping -- which could be a shared memory region coming from Linux. // In this case we'll need to (1) call mmap() in the shadow process to obtain a valid mapping // (2) when a page fault happens, grabs the physical page from linux. // if ((flags & MAP_SHARED) != 0 && SharedWithLinux(inode)) { // Don't know how to deal with it... if (addr != UserPtr.Zero) { return(-ErrorCode.EINVAL); } var vaddr = Arch.IPCStubs.linux_sys_alien_mmap2(current.Parent.helperPid, addr.Value, length, prot, flags, inode.LinuxFd, pgoffset); if (vaddr > AddressSpace.KERNEL_OFFSET) { return(-ErrorCode.EINVAL); } switch (inode.kind) { case GenericINode.INodeKind.BinderSharedINodeKind: case GenericINode.INodeKind.AshmemINodeKind: case GenericINode.INodeKind.ScreenBufferINodeKind: inode.AlienSharedMemoryINode.vaddrInShadowProcess = new Pointer(vaddr); break; default: // UNIMPLEMENTED... let's return EINVAL to make sure we can catch it. return(-ErrorCode.EINVAL); } } var r = space.AddMapping(ProtToAccessFlag(prot), flags, file, (uint)pgoffset * Arch.ArchDefinition.PageSize, length, targetAddr, memorySize); if (r < 0) { return(r); } // // HACK for binder IPC // if (inode != null && inode.kind == GenericINode.INodeKind.BinderINodeKind) { proc.binderVMStart = new UserPtr(targetAddr); proc.binderVMSize = length; } return(targetAddr.ToInt32()); }
internal int RemoveMapping(Pointer vaddr, int size) { Contract.Requires(size > 0); Contract.Ensures(Brk == Contract.OldValue(Brk)); if (Arch.ArchDefinition.PageOffset(size) != 0 || Arch.ArchDefinition.PageOffset(vaddr.ToUInt32()) != 0) return -ErrorCode.EINVAL; MemoryRegion prev; var end = vaddr + size; var c = false; var changed = false; int ret; RemoveMappingLeft(vaddr, size, out ret, out c, out prev); changed |= c; if (ret <= 0) { if (changed) { RemoveWorkingSet(vaddr, size); } return ret; } RemoveMappingCenter(vaddr, size, ref prev, out ret, out c); changed |= c; if (ret <= 0) { if (changed) { RemoveWorkingSet(vaddr, size); } return ret; } var r = prev.Next; changed = true; var s = end - r.StartAddress; r.CutLeft(s); prev = r; r = r.Next; RemoveWorkingSet(vaddr, size); return 0; }
public void Start(Pointer ip, Pointer sp) { impl.Start(ip, sp); }
public UserPtr(uint v) { _value = new Pointer(v); }
public bool Contains(Pointer page) { return Start <= page && page < End; }
public UserPtr(int v) { _value = new Pointer((uint)v); }
public void FreePage(Pointer page) { NativeMethods.sel4_alloc_free(handle, page, Arch.ArchDefinition.PageSize); }
private static int MapInSegments(File file, Process proc, ELF32Header eh) { Contract.Requires(file != null && file.GhostOwner == proc); Contract.Requires(proc.Space.GhostOwner == proc); var buf = new byte[ELF32ProgramHeader.Size]; var ph = new ELF32ProgramHeader(); // At this point we need to map in all stuff in PT_LOAD for (var i = 0; i < eh.NumOfProgramHeader; ++i) { var pos = (uint)(eh.ProgramHeaderOffest + eh.ProgramHeaderSize * i); if (file.Read(buf, ref pos) != buf.Length) return -ErrorCode.EINVAL; ph = ELF32ProgramHeader.Read(buf); var size = ph.FileSize > ph.MemorySize ? ph.FileSize : ph.MemorySize; if (ph.type != ELF32ProgramHeader.PT_LOAD) continue; // Round address to page boundary var diff = Arch.ArchDefinition.PageOffset(ph.vaddr); var vaddr = new Pointer(ph.vaddr); var offset = ph.offset; var memSize = (int)Arch.ArchDefinition.PageAlign((uint)ph.MemorySize); var fileSize = ph.FileSize; if (diff < 0 || ph.offset < diff || fileSize + diff > file.inode.Size || fileSize <= 0 || memSize <= 0) return -ErrorCode.EINVAL; vaddr -= diff; offset -= (uint)diff; fileSize += diff; if (fileSize > memSize) fileSize = memSize; if (proc.Space.AddMapping(ph.ExpressOSAccessFlag, 0, file, offset, fileSize, vaddr, memSize) != 0) return -ErrorCode.ENOMEM; // Update brk var segmentEnd = (vaddr + memSize).ToUInt32(); if (segmentEnd > proc.Space.Brk) { proc.Space.InitializeBrk(segmentEnd); } } return 0; }
public void FreePages(Pointer start, int pages) { NativeMethods.sel4_alloc_free(handle, start, pages * Arch.ArchDefinition.PageSize); }