public static void overlay_draw(osd_bitmap dest, artwork overlay) { int i, j; int height, width; osd_bitmap o = null; int black; o = overlay._artwork; height = overlay._artwork.height; width = overlay._artwork.width; black = Machine.pens[0]; if (dest.depth == 8) { _BytePtr dst, ovr; for (j = 0; j < height; j++) { dst = new _BytePtr(dest.line[j]); ovr = new _BytePtr(o.line[j]); for (i = 0; i < width; i++) { if (dst[0] != black) dst[0] = ovr[0]; dst.offset++; ovr.offset++; } } } else { _ShortPtr dst, ovr; for (j = 0; j < height; j++) { dst = new _ShortPtr(dest.line[j]); ovr = new _ShortPtr(o.line[j]); for (i = 0; i < width; i++) { if (dst.read16(0) != black) dst.write16(0, ovr.read16(0)); dst.offset += 2; ovr.offset += 2; } } } }
void apply_RC_filter(int channel, _ShortPtr buf, int len, int sample_rate) { if (c[channel] == 0) return; /* filter disabled */ float R1 = r1[channel]; float R2 = r2[channel]; float R3 = r3[channel]; float C = (float)(c[channel] * 1E-12); /* convert pF to F */ /* Cut Frequency = 1/(2*Pi*Req*C) */ float Req = (R1 * (R2 + R3)) / (R1 + R2 + R3); int K = (int)(0x10000 * Math.Exp(-1 / (Req * C) / sample_rate)); buf.write16(0, (ushort)(buf.read16(0) + (memory[channel] - buf.read16(0)) * K / 0x10000)); for (int i = 1; i < len; i++) buf.write16(i, (ushort)(buf.read16(i) + (buf.read16(i - 1) - buf.read16(i)) * K / 0x10000)); memory[channel] = buf.read16(len - 1); }
void namco_update_mono(int ch, _ShortPtr buffer, int length) { _ShortPtr mix; /* if no sound, we're done */ if (!sound_enable) { Array.Clear(buffer.buffer, buffer.offset, length*2); return; } /* zap the contents of the mixer buffer */ Array.Clear(mixer_buffer.buffer, mixer_buffer.offset, length * 2); /* loop over each voice and add its contribution */ for (int voice=0;voice<last_channel;voice++)//; voice < last_channel; voice++) { int f = channel_list[voice].frequency; int v = channel_list[voice].volume[0]; mix = new _ShortPtr(mixer_buffer); if (channel_list[voice].noise_sw != 0) { /* only update if we have non-zero volume and frequency */ if (v != 0 && (f & 0xff) != 0) { float fbase = (float)sample_rate / (float)namco_clock; int delta = (int)((float)((f & 0xff) << 4) * fbase); int c = channel_list[voice].noise_counter; /* add our contribution */ for (int i = 0; i < length; i++) { int noise_data; int cnt; if (channel_list[voice].noise_state != 0) noise_data = 0x07; else noise_data = -0x07; mix.write16(0, (ushort)((short)mix.read16(0) + noise_data * (v >> 1))); mix.offset += 2; c += delta; cnt = (c >> 12); c &= (1 << 12) - 1; for (; cnt > 0; cnt--) { if (((channel_list[voice].noise_seed + 1) & 2) != 0) channel_list[voice].noise_state ^= 1; if ((channel_list[voice].noise_seed & 1) != 0) channel_list[voice].noise_seed ^= 0x28000; channel_list[voice].noise_seed >>= 1; } } /* update the counter for this voice */ channel_list[voice].noise_counter = c; } } else { /* only update if we have non-zero volume and frequency */ if (v != 0 && f != 0) { int c = channel_list[voice].counter; /* add our contribution */ for (int i = 0; i < length; i++) { c += f; int offs = (c >> 15) & 0x1f; //ushort currentmix = mix.read16(0); if (samples_per_byte == 1) /* use only low 4 bits */ { mix.write16(0, (ushort)((short)mix.read16(0) + ((channel_list[voice].wave[offs] & 0x0f) - 8) * v)); mix.offset += 2; } else /* use full byte, first the high 4 bits, then the low 4 bits */ { if ((offs & 1) != 0) { mix.write16(0, (ushort)((short)mix.read16(0) + ((channel_list[voice].wave[offs >> 1] & 0x0f) - 8) * v)); mix.offset += 2; } else { mix.write16(0, (ushort)((short)mix.read16(0) + (((channel_list[voice].wave[offs >> 1] >> 4) & 0x0f) - 8) * v)); mix.offset += 2; } } } /* update the counter for this voice */ channel_list[voice].counter = c; } } } /* mix it down */ mix = new _ShortPtr(mixer_buffer); for (int i = 0; i < length; i++) { buffer.write16(0, (ushort)mixer_lookup[mixer_lookup_middle+(short)mix.read16(0)]); buffer.offset += 2; mix.offset += 2; } }
public static void adpcm_update(int num, _ShortPtr buffer, int length) { ADPCMVoice voice = _adpcm[num]; _ShortPtr sample_data = new _ShortPtr(MAX_SAMPLE_CHUNK * 2), curr_data = new _ShortPtr(sample_data); short prev = voice.last_sample, curr = voice.curr_sample; uint final_pos; uint new_samples; /* finish off the current sample */ if (voice.source_pos > 0) { /* interpolate */ while (length > 0 && voice.source_pos < FRAC_ONE) { buffer.write16(0, (ushort)((((int)prev * (FRAC_ONE - voice.source_pos)) + ((int)curr * voice.source_pos)) >> FRAC_BITS)); buffer.offset += 2; voice.source_pos += voice.source_step; length--; } /* if we're over, continue; otherwise, we're done */ if (voice.source_pos >= FRAC_ONE) voice.source_pos -= FRAC_ONE; else return; } /* compute how many new samples we need */ final_pos = (uint)(voice.source_pos + length * voice.source_step); new_samples = (final_pos + FRAC_ONE - 1) >> FRAC_BITS; if (new_samples > MAX_SAMPLE_CHUNK) new_samples = MAX_SAMPLE_CHUNK; /* generate them into our buffer */ generate_adpcm(voice, sample_data, (int)new_samples); prev = curr; curr = (short)curr_data.read16(0); curr_data.offset += 2; /* then sample-rate convert with linear interpolation */ while (length > 0) { /* interpolate */ while (length > 0 && voice.source_pos < FRAC_ONE) { buffer.write16(0, (ushort)((((int)prev * (FRAC_ONE - voice.source_pos)) + ((int)curr * voice.source_pos)) >> FRAC_BITS)); buffer.offset += 2; voice.source_pos += voice.source_step; length--; } /* if we're over, grab the next samples */ if (voice.source_pos >= FRAC_ONE) { voice.source_pos -= FRAC_ONE; prev = curr; curr = (short)curr_data.read16(0); curr_data.offset += 2; } } /* remember the last samples */ voice.last_sample = prev; voice.curr_sample = curr; }
public static void mixer_play_streamed_sample_16(int ch, _ShortPtr data, int len, int freq) { uint step_size, input_pos, output_pos, samples_mixed; int mixing_volume; /* skip if sound is off */ if (Machine.sample_rate == 0) return; mixer_channel[ch].is_stream = true; /* compute the overall mixing volume */ if (mixer_sound_enabled) mixing_volume = ((mixer_channel[ch].volume * mixer_channel[ch].mixing_level * 256) << mixer_channel[ch].gain) / (100 * 100); else mixing_volume = 0; /* compute the step size for sample rate conversion */ if (freq != mixer_channel[ch].frequency) { mixer_channel[ch].frequency = (uint)freq; mixer_channel[ch].step_size = (uint)((double)freq * (double)(1 << FRACTION_BITS) / (double)Machine.sample_rate); } step_size = mixer_channel[ch].step_size; /* now determine where to mix it */ input_pos = mixer_channel[ch].input_frac; output_pos = (accum_base + mixer_channel[ch].samples_available) & ACCUMULATOR_MASK; /* compute the length in fractional form */ len = (len / 2) << FRACTION_BITS; samples_mixed = 0; /* if we're mono or left panning, just mix to the left channel */ if (!is_stereo || mixer_channel[ch].pan == MIXER_PAN_LEFT) { while (input_pos < len) { left_accum[output_pos] += ((short)data.read16((int)(input_pos >> FRACTION_BITS)) * mixing_volume) >> 8; input_pos += step_size; output_pos = (output_pos + 1) & ACCUMULATOR_MASK; samples_mixed++; } } /* if we're right panning, just mix to the right channel */ else if (mixer_channel[ch].pan == MIXER_PAN_RIGHT) { while (input_pos < len) { right_accum[output_pos] += ((short)data.read16((int)(input_pos >> FRACTION_BITS)) * mixing_volume) >> 8; input_pos += step_size; output_pos = (output_pos + 1) & ACCUMULATOR_MASK; samples_mixed++; } } /* if we're stereo center, mix to both channels */ else { while (input_pos < len) { int mixing_value = ((short)data.read16((int)(input_pos >> FRACTION_BITS)) * mixing_volume) >> 8; left_accum[output_pos] += mixing_value; right_accum[output_pos] += mixing_value; input_pos += step_size; output_pos = (output_pos + 1) & ACCUMULATOR_MASK; samples_mixed++; } } /* update the final positions */ mixer_channel[ch].input_frac = input_pos & FRACTION_MASK; mixer_channel[ch].samples_available += samples_mixed; }
static void mix_sample_16(mixer_channel_data channel, int samples_to_generate) { uint step_size, input_frac, output_pos; _ShortPtr source; int source_end; int mixing_volume; /* compute the overall mixing volume */ if (mixer_sound_enabled) mixing_volume = ((channel.volume * channel.mixing_level * 256) << channel.gain) / (100 * 100); else mixing_volume = 0; /* get the initial state */ step_size = channel.step_size; source = new _ShortPtr(channel.data_current); source_end = channel.data_end; input_frac = channel.input_frac; output_pos = (accum_base + channel.samples_available) & ACCUMULATOR_MASK; /* an outer loop to handle looping samples */ while (samples_to_generate > 0) { /* if we're mono or left panning, just mix to the left channel */ if (!is_stereo || channel.pan == MIXER_PAN_LEFT) { while (source.offset < source_end && samples_to_generate > 0) { left_accum[output_pos] += (source.read16(0) * mixing_volume) >> 8; input_frac += step_size; source.offset += (int)(input_frac >> FRACTION_BITS) * 2; input_frac &= FRACTION_MASK; output_pos = (output_pos + 1) & ACCUMULATOR_MASK; samples_to_generate--; } } /* if we're right panning, just mix to the right channel */ else if (channel.pan == MIXER_PAN_RIGHT) { while (source.offset < source_end && samples_to_generate > 0) { right_accum[output_pos] += (source.read16(0) * mixing_volume) >> 8; input_frac += step_size; source.offset += (int)(input_frac >> FRACTION_BITS) * 2; input_frac &= FRACTION_MASK; output_pos = (output_pos + 1) & ACCUMULATOR_MASK; samples_to_generate--; } } /* if we're stereo center, mix to both channels */ else { while (source.offset < source_end && samples_to_generate > 0) { int mixing_value = (source.read16(0) * mixing_volume) >> 8; left_accum[output_pos] += mixing_value; right_accum[output_pos] += mixing_value; input_frac += step_size; source.offset += (int)(input_frac >> FRACTION_BITS) * 2; input_frac &= FRACTION_MASK; output_pos = (output_pos + 1) & ACCUMULATOR_MASK; samples_to_generate--; } } /* handle the end case */ if (source.offset >= source_end) { /* if we're done, stop playing */ if (!channel.is_looping) { channel.is_playing = false; break; } /* if we're looping, wrap to the beginning */ else source.offset-=source_end;// source.offset -= (INT16*)source_end - (INT16*)channel.data_start; } } /* update the final positions */ channel.input_frac = input_frac; channel.data_current = source.offset; }
static void K005289_update(int ch, _ShortPtr buffer, int length) { _ShortPtr mix; /* zap the contents of the mixer buffer */ Array.Clear(mixer_buffer.buffer, mixer_buffer.offset, length * sizeof(short)); //memset(mixer_buffer, 0, length * sizeof(INT16)); int v = channel_list[0].volume; int f = channel_list[0].frequency; if (v != 0 && f != 0) { _BytePtr w = channel_list[0].wave; int c = channel_list[0].counter; mix = new _ShortPtr(mixer_buffer); /* add our contribution */ for (int i = 0; i < length; i++) { int offs; c += (int)((((float)mclock / (float)(f * 16)) * (float)(1 << FREQBASEBITS)) / (float)(rate / 32)); offs = (c >> 16) & 0x1f; ushort _w = mix.read16(0); mix.write16(0, (ushort)(_w + (short)(((w[offs] & 0x0f) - 8) * v))); } /* update the counter for this voice */ channel_list[0].counter = c; } v = channel_list[1].volume; f = channel_list[1].frequency; if (v != 0 && f != 0) { _BytePtr w = channel_list[1].wave; int c = channel_list[1].counter; mix = mixer_buffer; /* add our contribution */ for (int i = 0; i < length; i++) { int offs; c += (int)((((float)mclock / (float)(f * 16)) * (float)(1 << FREQBASEBITS)) / (float)(rate / 32)); offs = (c >> 16) & 0x1f; ushort _w = mix.read16(0); mix.write16(0, (ushort)(_w + (short)(((w[offs] & 0x0f) - 8) * v))); } /* update the counter for this voice */ channel_list[1].counter = c; } /* mix it down */ mix = new _ShortPtr(mixer_buffer); for (int i = 0; i < length; i++) { buffer.write16(0, (ushort)mixer_lookup.read16((short)mix.read16(0))); buffer.offset += 2; mix.offset += 2; } }