/* * Create a new memory region in the address space. * * If there are any overlaps between the current address space and the requested one, * this function will unamp the overlapped portions of the address space before * mapping in the new memory region. * * Several clients, including the dynamic linker relies on this feature. See mmap(2) * for details. * * This function requires vaddr and memorySize are aligned to the page boundary. */ internal int AddMapping(uint access, int flags, File file, uint fileOffset, int fileSize, Pointer vaddr, int memorySize) { Contract.Requires(file == null || file.GhostOwner == GhostOwner); Contract.Requires(0 <= fileSize && fileSize <= memorySize); Contract.Requires(file == null || fileSize > 0); Contract.Requires(file != null || (fileSize == 0 && fileOffset == 0)); Contract.Ensures(Brk == Contract.OldValue(Brk)); if (memorySize <= 0 || Arch.ArchDefinition.PageOffset(memorySize) != 0) { return(-ErrorCode.EINVAL); } var diff = Arch.ArchDefinition.PageOffset(vaddr.ToUInt32()); if (diff != 0) { return(-ErrorCode.EINVAL); } var r = RemoveMapping(vaddr, memorySize); if (r != 0) { return(r); } var newRegion = new MemoryRegion(GhostOwner, access, flags, file, fileOffset, fileSize, vaddr, memorySize, false); Insert(newRegion); return(0); }
/* * Create a new memory region in the address space. * * If there are any overlaps between the current address space and the requested one, * this function will unamp the overlapped portions of the address space before * mapping in the new memory region. * * Several clients, including the dynamic linker relies on this feature. See mmap(2) * for details. * * This function requires vaddr and memorySize are aligned to the page boundary. */ internal int AddMapping(uint access, int flags, File file, uint fileOffset, int fileSize, Pointer vaddr, int memorySize) { Contract.Requires(file == null || file.GhostOwner == GhostOwner); Contract.Requires(0 <= fileSize && fileSize <= memorySize); Contract.Requires(file == null || fileSize > 0); Contract.Requires(file != null || (fileSize == 0 && fileOffset == 0)); Contract.Ensures(Brk == Contract.OldValue(Brk)); if (memorySize <= 0 || Arch.ArchDefinition.PageOffset(memorySize) != 0) return -ErrorCode.EINVAL; var diff = Arch.ArchDefinition.PageOffset(vaddr.ToUInt32()); if (diff != 0) return -ErrorCode.EINVAL; var r = RemoveMapping(vaddr, memorySize); if (r != 0) { return r; } var newRegion = new MemoryRegion(GhostOwner, access, flags, file, fileOffset, fileSize, vaddr, memorySize, false); Insert(newRegion); return 0; }
internal int RemoveMapping(Pointer vaddr, int size) { Contract.Requires(size > 0); Contract.Ensures(Brk == Contract.OldValue(Brk)); if (Arch.ArchDefinition.PageOffset(size) != 0 || Arch.ArchDefinition.PageOffset(vaddr.ToUInt32()) != 0) return -ErrorCode.EINVAL; MemoryRegion prev; var end = vaddr + size; var c = false; var changed = false; int ret; RemoveMappingLeft(vaddr, size, out ret, out c, out prev); changed |= c; if (ret <= 0) { if (changed) { RemoveWorkingSet(vaddr, size); } return ret; } RemoveMappingCenter(vaddr, size, ref prev, out ret, out c); changed |= c; if (ret <= 0) { if (changed) { RemoveWorkingSet(vaddr, size); } return ret; } var r = prev.Next; changed = true; var s = end - r.StartAddress; r.CutLeft(s); prev = r; r = r.Next; RemoveWorkingSet(vaddr, size); return 0; }
internal bool UpdateAccessRightRange(Pointer start, int size, uint access) { if (Arch.ArchDefinition.PageOffset(start.ToUInt32()) != 0) return false; var prev = Head; var r = prev.Next; var end = start + size; while (r != null && r.StartAddress < end) { if (!r.OverlappedInt(start, size) || access == r.Access) { prev = r; r = r.Next; } else { if (r.IsFixed) { return false; } if (r.StartAddress < start) { var region_end = r.End; var middleRegion = Split(r, start - r.StartAddress); if (end < region_end) { // update middle region prev = Split(middleRegion, size); UpdateAccessRights(middleRegion, access); return true; } else { UpdateAccessRights(middleRegion, access); TryMergeWithNext(middleRegion); prev = middleRegion; r = prev.Next; } } else { if (r.End <= end) { UpdateAccessRights(r, access); TryMergeWithNext(r); var merged = TryMergeWithNext(prev); if (merged) { r = prev.Next; } else { prev = prev.Next; r = prev.Next; } } else { var right_region = Split(r, end - r.StartAddress); UpdateAccessRights(r, access); var merged = TryMergeWithNext(prev); prev = right_region; r = prev.Next; } } } } return true; }
public static int mmap2(Thread current, UserPtr addr, int length, int prot, int flags, int fd, int pgoffset) { Contract.Requires(current.Parent == current.Parent.Space.GhostOwner); Pointer targetAddr = new Pointer(Arch.ArchDefinition.PageAlign(addr.Value.ToUInt32())); //Arch.Console.Write("mmap2:"); //Arch.Console.Write(addr.Value.ToInt32()); //Arch.Console.Write(" sz="); //Arch.Console.Write(length); //Arch.Console.Write(" prot="); //Arch.Console.Write(prot); //Arch.Console.WriteLine(); var proc = current.Parent; var space = proc.Space; Contract.Assert(proc == space.GhostOwner); if ((flags & MAP_FIXED) == 0) { if (targetAddr == Pointer.Zero || space.ContainRegion(targetAddr, length)) targetAddr = space.FindFreeRegion(length); } else if (Arch.ArchDefinition.PageOffset(addr.Value.ToInt32()) != 0) { return -ErrorCode.EINVAL; } targetAddr = new Pointer(Arch.ArchDefinition.PageIndex(targetAddr.ToUInt32())); if (targetAddr == Pointer.Zero || length == 0) return -ErrorCode.EINVAL; File file = null; GenericINode inode = null; if ((flags & MAP_ANONYMOUS) == 0) { file = proc.LookupFile(fd); if (file == null) return -ErrorCode.EBADF; inode = file.inode; } int memorySize = Arch.ArchDefinition.PageAlign(length); // fix for code contract if (length > memorySize) length = memorySize; if ((file != null && length == 0) || length < 0) return -ErrorCode.EINVAL; if (file == null) { pgoffset = 0; length = 0; } // // Be careful for shared mapping -- which could be a shared memory region coming from Linux. // In this case we'll need to (1) call mmap() in the shadow process to obtain a valid mapping // (2) when a page fault happens, grabs the physical page from linux. // if ((flags & MAP_SHARED) != 0 && SharedWithLinux(inode)) { // Don't know how to deal with it... if (addr != UserPtr.Zero) return -ErrorCode.EINVAL; var vaddr = Arch.IPCStubs.linux_sys_alien_mmap2(current.Parent.helperPid, addr.Value, length, prot, flags, inode.LinuxFd, pgoffset); if (vaddr > AddressSpace.KERNEL_OFFSET) return -ErrorCode.EINVAL; switch (inode.kind) { case GenericINode.INodeKind.BinderSharedINodeKind: case GenericINode.INodeKind.AshmemINodeKind: case GenericINode.INodeKind.ScreenBufferINodeKind: inode.AlienSharedMemoryINode.vaddrInShadowProcess = new Pointer(vaddr); break; default: // UNIMPLEMENTED... let's return EINVAL to make sure we can catch it. return -ErrorCode.EINVAL; } } var r = space.AddMapping(ProtToAccessFlag(prot), flags, file, (uint)pgoffset * Arch.ArchDefinition.PageSize, length, targetAddr, memorySize); if (r < 0) return r; // // HACK for binder IPC // if (inode != null && inode.kind == GenericINode.INodeKind.BinderINodeKind) { proc.binderVMStart = new UserPtr(targetAddr); proc.binderVMSize = length; } return targetAddr.ToInt32(); }
public static void HandlePageFault(Process process, uint faultType, Pointer faultAddress, Pointer faultIP, out Pointer physicalPage, out uint permission) { // Object invariants of Process Contract.Assume(process.Space.GhostOwner == process); Contract.Assume(process.Space.Head.GhostOwner == process); // Never map page 0 if (faultAddress.ToUInt32() < Arch.ArchDefinition.PageSize) { physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; } SyscallProfiler.EnterPageFault(); var space = process.Space; var region = space.Find(faultAddress); if (region != null && (faultType & region.Access) != 0) { /* * Check whether the kernel has allocated a page for this address, * which might be the case due to UserPtr.Read() / UserPtr.Write(). */ var mapped_in_page = space.UserToVirt(new UserPtr(faultAddress)); if (mapped_in_page != Pointer.Zero) { physicalPage = PageIndex(mapped_in_page); permission = region.Access & MemoryRegion.FAULT_MASK; return; } // If the page is a shared page from Linux, we'll need to ask linux to grab the page // Otherwise let's just allocate a fresh one. var shared_memory_region = IsAlienSharedRegion(region); var ghost_page_from_fresh_memory = false; ByteBufferRef buf; if (shared_memory_region) { buf = Globals.LinuxMemoryAllocator.GetUserPage(process, faultType, ToShadowProcessAddress(faultAddress, region)); if (!buf.isValid) { Arch.Console.WriteLine("pager: cannot map in alien page."); space.DumpAll(); physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; } } else { buf = Globals.PageAllocator.AllocPage(); ghost_page_from_fresh_memory = true; if (!buf.isValid) { Arch.Console.WriteLine("Cannot allocate new pages"); Utils.Panic(); } if (region.BackingFile != null) { var rel_pos = (PageIndex(faultAddress) - region.StartAddress); uint pos = (uint)((ulong)rel_pos + region.FileOffset); var readSizeLong = region.FileSize - rel_pos; if (readSizeLong < 0) { readSizeLong = 0; } else if (readSizeLong > Arch.ArchDefinition.PageSize) { readSizeLong = Arch.ArchDefinition.PageSize; } int readSize = (int)readSizeLong; Contract.Assert(region.BackingFile.GhostOwner == process); var r = region.BackingFile.Read(buf, 0, readSize, ref pos); if (r < 0) { r = 0; } Utils.Assert(r <= Arch.ArchDefinition.PageSize); if (r < Arch.ArchDefinition.PageSize) { buf.ClearAfter(r); } } else { buf.Clear(); } } Contract.Assert(shared_memory_region ^ ghost_page_from_fresh_memory); var page = new Pointer(buf.Location); space.AddIntoWorkingSet(new UserPtr(PageIndex(faultAddress)), page); SyscallProfiler.ExitPageFault(); physicalPage = page; permission = region.Access & MemoryRegion.FAULT_MASK; return; } else { /* * TODO: mmap2 enables the application requests an automatically expandable * region (e.g., a stack) * * The feature doesn't seem to be actively used by the applications, since * both the C runtime and the pthread library initializes stack explicitly. * * The feature is currently unimplemented. */ } physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; }
internal int RemoveMapping(Pointer vaddr, int size) { Contract.Requires(size > 0); Contract.Ensures(Brk == Contract.OldValue(Brk)); if (Arch.ArchDefinition.PageOffset(size) != 0 || Arch.ArchDefinition.PageOffset(vaddr.ToUInt32()) != 0) { return(-ErrorCode.EINVAL); } MemoryRegion prev; var end = vaddr + size; var c = false; var changed = false; int ret; RemoveMappingLeft(vaddr, size, out ret, out c, out prev); changed |= c; if (ret <= 0) { if (changed) { RemoveWorkingSet(vaddr, size); } return(ret); } RemoveMappingCenter(vaddr, size, ref prev, out ret, out c); changed |= c; if (ret <= 0) { if (changed) { RemoveWorkingSet(vaddr, size); } return(ret); } var r = prev.Next; changed = true; var s = end - r.StartAddress; r.CutLeft(s); prev = r; r = r.Next; RemoveWorkingSet(vaddr, size); return(0); }
internal bool UpdateAccessRightRange(Pointer start, int size, uint access) { if (Arch.ArchDefinition.PageOffset(start.ToUInt32()) != 0) { return(false); } var prev = Head; var r = prev.Next; var end = start + size; while (r != null && r.StartAddress < end) { if (!r.OverlappedInt(start, size) || access == r.Access) { prev = r; r = r.Next; } else { if (r.IsFixed) { return(false); } if (r.StartAddress < start) { var region_end = r.End; var middleRegion = Split(r, start - r.StartAddress); if (end < region_end) { // update middle region prev = Split(middleRegion, size); UpdateAccessRights(middleRegion, access); return(true); } else { UpdateAccessRights(middleRegion, access); TryMergeWithNext(middleRegion); prev = middleRegion; r = prev.Next; } } else { if (r.End <= end) { UpdateAccessRights(r, access); TryMergeWithNext(r); var merged = TryMergeWithNext(prev); if (merged) { r = prev.Next; } else { prev = prev.Next; r = prev.Next; } } else { var right_region = Split(r, end - r.StartAddress); UpdateAccessRights(r, access); var merged = TryMergeWithNext(prev); prev = right_region; r = prev.Next; } } } } return(true); }
public static void HandlePageFault(Process process, uint faultType, Pointer faultAddress, Pointer faultIP, out Pointer physicalPage, out uint permission) { // Object invariants of Process Contract.Assume(process.Space.GhostOwner == process); Contract.Assume(process.Space.Head.GhostOwner == process); // Never map page 0 if (faultAddress.ToUInt32() < Arch.ArchDefinition.PageSize) { physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; } SyscallProfiler.EnterPageFault(); var space = process.Space; var region = space.Find(faultAddress); if (region != null && (faultType & region.Access) != 0) { /* * Check whether the kernel has allocated a page for this address, * which might be the case due to UserPtr.Read() / UserPtr.Write(). */ var mapped_in_page = space.UserToVirt(new UserPtr(faultAddress)); if (mapped_in_page != Pointer.Zero) { physicalPage = PageIndex(mapped_in_page); permission = region.Access & MemoryRegion.FAULT_MASK; return; } // If the page is a shared page from Linux, we'll need to ask linux to grab the page // Otherwise let's just allocate a fresh one. var shared_memory_region = IsAlienSharedRegion(region); var ghost_page_from_fresh_memory = false; ByteBufferRef buf; if (shared_memory_region) { buf = Globals.LinuxMemoryAllocator.GetUserPage(process, faultType, ToShadowProcessAddress(faultAddress, region)); if (!buf.isValid) { Arch.Console.WriteLine("pager: cannot map in alien page."); space.DumpAll(); physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; } } else { buf = Globals.PageAllocator.AllocPage(); ghost_page_from_fresh_memory = true; if (!buf.isValid) { Arch.Console.WriteLine("Cannot allocate new pages"); Utils.Panic(); } if (region.BackingFile != null) { var rel_pos = (PageIndex(faultAddress) - region.StartAddress); uint pos = (uint)((ulong)rel_pos + region.FileOffset); var readSizeLong = region.FileSize - rel_pos; if (readSizeLong < 0) readSizeLong = 0; else if (readSizeLong > Arch.ArchDefinition.PageSize) readSizeLong = Arch.ArchDefinition.PageSize; int readSize = (int)readSizeLong; Contract.Assert(region.BackingFile.GhostOwner == process); var r = region.BackingFile.Read(buf, 0, readSize, ref pos); if (r < 0) r = 0; Utils.Assert(r <= Arch.ArchDefinition.PageSize); if (r < Arch.ArchDefinition.PageSize) buf.ClearAfter(r); } else { buf.Clear(); } } Contract.Assert(shared_memory_region ^ ghost_page_from_fresh_memory); var page = new Pointer(buf.Location); space.AddIntoWorkingSet(new UserPtr(PageIndex(faultAddress)), page); SyscallProfiler.ExitPageFault(); physicalPage = page; permission = region.Access & MemoryRegion.FAULT_MASK; return; } else { /* * TODO: mmap2 enables the application requests an automatically expandable * region (e.g., a stack) * * The feature doesn't seem to be actively used by the applications, since * both the C runtime and the pthread library initializes stack explicitly. * * The feature is currently unimplemented. */ } physicalPage = Pointer.Zero; permission = MemoryRegion.FALUT_NONE; return; }
public static int mmap2(Thread current, UserPtr addr, int length, int prot, int flags, int fd, int pgoffset) { Contract.Requires(current.Parent == current.Parent.Space.GhostOwner); Pointer targetAddr = new Pointer(Arch.ArchDefinition.PageAlign(addr.Value.ToUInt32())); //Arch.Console.Write("mmap2:"); //Arch.Console.Write(addr.Value.ToInt32()); //Arch.Console.Write(" sz="); //Arch.Console.Write(length); //Arch.Console.Write(" prot="); //Arch.Console.Write(prot); //Arch.Console.WriteLine(); var proc = current.Parent; var space = proc.Space; Contract.Assert(proc == space.GhostOwner); if ((flags & MAP_FIXED) == 0) { if (targetAddr == Pointer.Zero || space.ContainRegion(targetAddr, length)) { targetAddr = space.FindFreeRegion(length); } } else if (Arch.ArchDefinition.PageOffset(addr.Value.ToInt32()) != 0) { return(-ErrorCode.EINVAL); } targetAddr = new Pointer(Arch.ArchDefinition.PageIndex(targetAddr.ToUInt32())); if (targetAddr == Pointer.Zero || length == 0) { return(-ErrorCode.EINVAL); } File file = null; GenericINode inode = null; if ((flags & MAP_ANONYMOUS) == 0) { file = proc.LookupFile(fd); if (file == null) { return(-ErrorCode.EBADF); } inode = file.inode; } int memorySize = Arch.ArchDefinition.PageAlign(length); // fix for code contract if (length > memorySize) { length = memorySize; } if ((file != null && length == 0) || length < 0) { return(-ErrorCode.EINVAL); } if (file == null) { pgoffset = 0; length = 0; } // // Be careful for shared mapping -- which could be a shared memory region coming from Linux. // In this case we'll need to (1) call mmap() in the shadow process to obtain a valid mapping // (2) when a page fault happens, grabs the physical page from linux. // if ((flags & MAP_SHARED) != 0 && SharedWithLinux(inode)) { // Don't know how to deal with it... if (addr != UserPtr.Zero) { return(-ErrorCode.EINVAL); } var vaddr = Arch.IPCStubs.linux_sys_alien_mmap2(current.Parent.helperPid, addr.Value, length, prot, flags, inode.LinuxFd, pgoffset); if (vaddr > AddressSpace.KERNEL_OFFSET) { return(-ErrorCode.EINVAL); } switch (inode.kind) { case GenericINode.INodeKind.BinderSharedINodeKind: case GenericINode.INodeKind.AshmemINodeKind: case GenericINode.INodeKind.ScreenBufferINodeKind: inode.AlienSharedMemoryINode.vaddrInShadowProcess = new Pointer(vaddr); break; default: // UNIMPLEMENTED... let's return EINVAL to make sure we can catch it. return(-ErrorCode.EINVAL); } } var r = space.AddMapping(ProtToAccessFlag(prot), flags, file, (uint)pgoffset * Arch.ArchDefinition.PageSize, length, targetAddr, memorySize); if (r < 0) { return(r); } // // HACK for binder IPC // if (inode != null && inode.kind == GenericINode.INodeKind.BinderINodeKind) { proc.binderVMStart = new UserPtr(targetAddr); proc.binderVMSize = length; } return(targetAddr.ToInt32()); }