//------------------------------------------------- // find_memshare - find memory share //------------------------------------------------- public override ListBytesPointer find_memshare(u8 width, out UInt32 bytes, bool required) { bytes = 0; // look up the share and return NULL if not found memory_share share = base_().memshare(tag()); if (share == null) { return(null); } // check the width and warn if not correct if (width != 0 && share.bitwidth() != width) { if (required) { osd_printf_warning("Shared ptr '{0}' found but is width {1}, not {2} as requested\n", tag(), share.bitwidth(), width); } return(null); } // return results bytes = (UInt32)share.bytes(); return(share.ptr()); }
// generic read/write handlers //DECLARE_READ8_MEMBER(read8); //DECLARE_READ8_MEMBER(read8_ext); //DECLARE_WRITE8_MEMBER(write8); //DECLARE_WRITE8_MEMBER(write8_ext); //DECLARE_WRITE8_MEMBER(write_indirect); //DECLARE_WRITE8_MEMBER(write_indirect_ext); //DECLARE_READ16_MEMBER(read16); //DECLARE_READ16_MEMBER(read16_ext); //DECLARE_WRITE16_MEMBER(write16); //DECLARE_WRITE16_MEMBER(write16_ext); //DECLARE_READ32_MEMBER(read32); //DECLARE_WRITE32_MEMBER(write32); // helper to update palette when data changed //void update() { if (!m_init.isnull()) m_init(*this); } // device-level overrides //------------------------------------------------- // device_start - start up the device //------------------------------------------------- protected override void device_start() { // bind the init function //m_init.bind_relative_to(*owner()); // find the memory, if present memory_share share = memshare(tag()); if (share != null) { // find the extended (split) memory, if present string tag_ext = tag() + "_ext"; memory_share share_ext = memshare(tag_ext); // make sure we have specified a format //assert_always(m_raw_to_rgb.bytes_per_entry() > 0, "Palette has memory share but no format specified"); // determine bytes per entry and configure int bytes_per_entry = m_raw_to_rgb.bytes_per_entry(); if (share_ext == null) { m_paletteram.set(share, bytes_per_entry); } else { m_paletteram.set(share, bytes_per_entry / 2); m_paletteram_ext.set(share_ext, bytes_per_entry / 2); } // override membits if provided if (m_membits_supplied) { // forcing width only makes sense when narrower than the native bus width //assert_always(m_membits < share->bitwidth(), "Improper use of MCFG_PALETTE_MEMBITS"); m_paletteram.set_membits(m_membits); if (share_ext != null) { m_paletteram_ext.set_membits(m_membits); } } // override endianness if provided if (m_endianness_supplied) { // forcing endianness only makes sense when the RAM is narrower than the palette format and not split //assert_always((share_ext == NULL && m_paletteram.membits() / 8 < bytes_per_entry), "Improper use of MCFG_PALETTE_ENDIANNESS"); m_paletteram.set_endianness(m_endianness); } } // call the initialization helper if present if (m_init != null) { m_init(this); } }
// device-level overrides protected override void device_start() { // find spriteram memory_share spriteram = owner().memshare(tag()); if (spriteram != null) { set_spriteram(spriteram.ptr(), (uint32_t)spriteram.bytes()); //set_spriteram(reinterpret_cast<_SpriteRAMType *>(spriteram->ptr()), spriteram->bytes()); // save states save_item(NAME(new { m_buffer })); } }
public void set(memory_share share, int bpe) { set(share.ptr(), (UInt32)share.bytes(), share.bitwidth(), share.endianness(), bpe); }
// decoding //------------------------------------------------- // decode_gfx - parse gfx decode info and // create gfx elements //------------------------------------------------- void decode_gfx(gfx_decode_entry [] gfxdecodeinfo) { // skip if nothing to do if (gfxdecodeinfo == null) { return; } // local variables to hold mutable copies of gfx layout data gfx_layout glcopy; std.vector <u32> extxoffs = new std.vector <u32>(0); std.vector <u32> extyoffs = new std.vector <u32>(0); // loop over all elements for (u8 curgfx = 0; curgfx < digfx_global.MAX_GFX_ELEMENTS && curgfx < gfxdecodeinfo.Length && gfxdecodeinfo[curgfx].gfxlayout != null; curgfx++) { gfx_decode_entry gfx = gfxdecodeinfo[curgfx]; // extract the scale factors and xormask u32 xscale = GFXENTRY_GETXSCALE(gfx.flags); u32 yscale = GFXENTRY_GETYSCALE(gfx.flags); u32 xormask = GFXENTRY_ISREVERSE(gfx.flags) ? 7U : 0U; // resolve the region u32 region_length; ListBytesPointer region_base; //const u8 *region_base; u8 region_width; endianness_t region_endianness; if (gfx.memory_region != null) { device_t basedevice = (GFXENTRY_ISDEVICE(gfx.flags)) ? device() : device().owner(); if (GFXENTRY_ISRAM(gfx.flags)) { memory_share share = basedevice.memshare(gfx.memory_region); //assert(share != NULL); region_length = (UInt32)(8 * share.bytes()); region_base = share.ptr(); //region_base = reinterpret_cast<u8 *>(share->ptr()); region_width = share.bytewidth(); region_endianness = share.endianness(); } else { memory_region region = basedevice.memregion(gfx.memory_region); //assert(region != NULL); region_length = 8 * region.bytes(); region_base = new ListBytesPointer(region.base_(), 0); //region_base = region->base(); region_width = region.bytewidth(); region_endianness = region.endianness(); } } else { region_length = 0; region_base = null; region_width = 1; region_endianness = ENDIANNESS_NATIVE; } if (region_endianness != ENDIANNESS_NATIVE) { switch (region_width) { case 2: xormask |= 0x08; break; case 4: xormask |= 0x18; break; case 8: xormask |= 0x38; break; } } // copy the layout into our temporary variable glcopy = new gfx_layout(gfx.gfxlayout); //memcpy(&glcopy, gfx.gfxlayout, sizeof(gfx_layout)); // if the character count is a region fraction, compute the effective total if (IS_FRAC(glcopy.total)) { //assert(region_length != 0); glcopy.total = region_length / glcopy.charincrement * FRAC_NUM(glcopy.total) / FRAC_DEN(glcopy.total); } // for non-raw graphics, decode the X and Y offsets if (glcopy.planeoffset[0] != digfx_global.GFX_RAW) { // copy the X and Y offsets into our temporary arrays extxoffs.resize((int)(glcopy.width * xscale)); extyoffs.resize((int)(glcopy.height * yscale)); //memcpy(&extxoffs[0], (glcopy.extxoffs != null) ? glcopy.extxoffs : glcopy.xoffset, glcopy.width * sizeof(UInt32)); //memcpy(&extyoffs[0], (glcopy.extyoffs != null) ? glcopy.extyoffs : glcopy.yoffset, glcopy.height * sizeof(UInt32)); for (int i = 0; i < glcopy.width; i++) { extxoffs[i] = glcopy.extxoffs != null ? glcopy.extxoffs[i] : glcopy.xoffset[i]; } for (int i = 0; i < glcopy.height; i++) { extyoffs[i] = glcopy.extyoffs != null ? glcopy.extyoffs[i] : glcopy.yoffset[i]; } // always use the extended offsets here glcopy.extxoffs = extxoffs; glcopy.extyoffs = extyoffs; // expand X and Y by the scale factors if (xscale > 1) { glcopy.width *= (UInt16)xscale; for (int j = glcopy.width - 1; j >= 0; j--) { extxoffs[j] = extxoffs[(int)(j / xscale)]; } } if (yscale > 1) { glcopy.height *= (UInt16)yscale; for (int j = glcopy.height - 1; j >= 0; j--) { extyoffs[j] = extyoffs[(int)(j / yscale)]; } } // loop over all the planes, converting fractions for (int j = 0; j < glcopy.planes; j++) { u32 value1 = glcopy.planeoffset[j]; if (IS_FRAC(value1)) { //assert(region_length != 0); glcopy.planeoffset[j] = FRAC_OFFSET(value1) + region_length * FRAC_NUM(value1) / FRAC_DEN(value1); } } // loop over all the X/Y offsets, converting fractions for (int j = 0; j < glcopy.width; j++) { u32 value2 = extxoffs[j]; if (digfx_global.IS_FRAC(value2)) { //assert(region_length != 0); extxoffs[j] = FRAC_OFFSET(value2) + region_length * FRAC_NUM(value2) / FRAC_DEN(value2); } } for (int j = 0; j < glcopy.height; j++) { u32 value3 = extyoffs[j]; if (IS_FRAC(value3)) { //assert(region_length != 0); extyoffs[j] = FRAC_OFFSET(value3) + region_length * FRAC_NUM(value3) / FRAC_DEN(value3); } } } // otherwise, just use the line modulo else { int base_ = (int)gfx.start; int end = (int)(region_length / 8); int linemod = (int)glcopy.yoffset[0]; while (glcopy.total > 0) { int elementbase = (int)(base_ + (glcopy.total - 1) * glcopy.charincrement / 8); int lastpixelbase = elementbase + glcopy.height * linemod / 8 - 1; if (lastpixelbase < end) { break; } glcopy.total--; } } // allocate the graphics //m_gfx[curgfx] = new gfx_element(m_palette, glcopy, region_base != null ? region_base + gfx.start : null, xormask, gfx.total_color_codes, gfx.color_codes_start); m_gfx[curgfx] = new gfx_element(m_paletteDevice.target.palette_interface, glcopy, region_base != null ? new ListBytesPointer(region_base, (int)gfx.start) : null, xormask, gfx.total_color_codes, gfx.color_codes_start); } m_decoded = true; }
//void write16_ext(offs_t offset, u16 data, u16 mem_mask = u16(~0)); //u32 read32(offs_t offset); //void write32(offs_t offset, u32 data, u32 mem_mask = u32(~0)); // helper to update palette when data changed //void update() { if (!m_init.isnull()) m_init(*this); } // device-level overrides //------------------------------------------------- // device_start - start up the device //------------------------------------------------- protected override void device_start() { // bind the init function //m_init.resolve(); // find the memory, if present memory_share share = memshare(tag()); if (share != null) { // find the extended (split) memory, if present string tag_ext = tag() + "_ext"; memory_share share_ext = memshare(tag_ext); // make sure we have specified a format if (m_raw_to_rgb.bytes_per_entry() <= 0) { throw new emu_fatalerror("palette_device({0}): Palette has memory share but no format specified", tag()); } // determine bytes per entry and configure int bytes_per_entry = m_raw_to_rgb.bytes_per_entry(); if (share_ext == null) { m_paletteram.set(share, bytes_per_entry); } else { m_paletteram.set(share, bytes_per_entry / 2); m_paletteram_ext.set(share_ext, bytes_per_entry / 2); } // override membits if provided if (m_membits_supplied) { // forcing width only makes sense when narrower than the native bus width if (m_membits >= share.bitwidth()) { throw new emu_fatalerror("palette_device({0}): Improper use of MCFG_PALETTE_MEMBITS", tag()); } m_paletteram.set_membits(m_membits); if (share_ext != null) { m_paletteram_ext.set_membits(m_membits); } } // override endianness if provided if (m_endianness_supplied) { // forcing endianness only makes sense when the RAM is narrower than the palette format and not split if (share_ext != null || (m_paletteram.membits() / 8) >= bytes_per_entry) { throw new emu_fatalerror("palette_device({0}): Improper use of MCFG_PALETTE_ENDIANNESS", tag()); } m_paletteram.set_endianness(m_endianness); } } // call the initialization helper if present if (m_init != null) { m_init(this); } }