+ /* pointer, lenght and actual playing position of sound sample */
+ sample_ptr = mixer[i].data_ptr;
+ sample_len = mixer[i].data_len;
+ sample_pos = mixer[i].playingpos;
+ sample_size = MIN(max_sample_size, sample_len - sample_pos);
+ mixer[i].playingpos += sample_size;
+
+ /* copy original sample to first mixing buffer */
+ CopySampleToMixingBuffer(&mixer[i], sample_pos, sample_size,
+ premix_first_buffer);
+
+ /* are we about to restart a looping sound? */
+ if (IS_LOOP(mixer[i]) && sample_size < max_sample_size)
+ {
+ while (sample_size < max_sample_size)
+ {
+ int restarted_sample_size =
+ MIN(max_sample_size - sample_size, sample_len);
+
+ if (mixer[i].format == AUDIO_FORMAT_U8)
+ for (j=0; j<restarted_sample_size; j++)
+ premix_first_buffer[sample_size + j] =
+ ((short)(((byte *)sample_ptr)[j] ^ 0x80)) << 8;
+ else
+ for (j=0; j<restarted_sample_size; j++)
+ premix_first_buffer[sample_size + j] =
+ ((short *)sample_ptr)[j];
+
+ mixer[i].playingpos = restarted_sample_size;
+ sample_size += restarted_sample_size;
+ }
+ }
+
+ /* decrease volume if sound is fading out */
+ if (IS_FADING(mixer[i]) &&
+ mixer[i].volume >= SOUND_FADING_VOLUME_THRESHOLD)
+ mixer[i].volume -= SOUND_FADING_VOLUME_STEP;
+
+ /* adjust volume of actual sound sample */
+ if (mixer[i].volume != PSND_MAX_VOLUME)
+ for(j=0; j<sample_size; j++)
+ premix_first_buffer[j] =
+ (mixer[i].volume * (long)premix_first_buffer[j])
+ >> PSND_MAX_VOLUME_BITS;
+
+ /* fill the last mixing buffer with stereo or mono sound */
+ if (stereo)
+ {
+ int middle_pos = PSND_MAX_LEFT2RIGHT / 2;
+ int left_volume = stereo_volume[middle_pos + mixer[i].stereo];
+ int right_volume= stereo_volume[middle_pos - mixer[i].stereo];
+
+ for(j=0; j<sample_size; j++)
+ {
+ premix_left_buffer[j] =
+ (left_volume * premix_first_buffer[j])
+ >> PSND_MAX_LEFT2RIGHT_BITS;
+ premix_right_buffer[j] =
+ (right_volume * premix_first_buffer[j])
+ >> PSND_MAX_LEFT2RIGHT_BITS;
+
+ premix_last_buffer[2 * j + 0] += premix_left_buffer[j];
+ premix_last_buffer[2 * j + 1] += premix_right_buffer[j];
+ }
+ }
+ else
+ {
+ for(j=0; j<sample_size; j++)
+ premix_last_buffer[j] += premix_first_buffer[j];
+ }
+
+ /* delete completed sound entries from the mixer */
+ if (mixer[i].playingpos >= mixer[i].data_len)
+ {
+ if (IS_LOOP(mixer[i]))
+ mixer[i].playingpos = 0;
+ else
+ Mixer_RemoveSound(i);
+ }
+ else if (mixer[i].volume <= SOUND_FADING_VOLUME_THRESHOLD)
+ Mixer_RemoveSound(i);
+ }
+
+ /* prepare final playing buffer according to system audio format */
+ for(i=0; i<max_sample_size * (stereo ? 2 : 1); i++)
+ {
+ /* cut off at 17 bit value */
+ if (premix_last_buffer[i] < -65535)
+ premix_last_buffer[i] = -65535;
+ else if (premix_last_buffer[i] > 65535)
+ premix_last_buffer[i] = 65535;
+
+ /* shift to 16 bit value */
+ premix_last_buffer[i] >>= 1;
+
+ if (afmt.format & AUDIO_FORMAT_U8)
+ {
+ playing_buffer[i] = (premix_last_buffer[i] >> 8) ^ 0x80;
+ }
+ else if (afmt.format & AUDIO_FORMAT_LE) /* 16 bit */
+ {
+ playing_buffer[2 * i + 0] = premix_last_buffer[i] & 0xff;
+ playing_buffer[2 * i + 1] = premix_last_buffer[i] >> 8;
+ }
+ else /* big endian */
+ {
+ playing_buffer[2 * i + 0] = premix_last_buffer[i] >> 8;
+ playing_buffer[2 * i + 1] = premix_last_buffer[i] & 0xff;
+ }
+ }
+
+ /* finally play the sound fragment */
+ write(audio.device_fd, playing_buffer, fragment_size);
+
+ if (!mixer_active_channels)
+ CloseAudioDevice(&audio.device_fd);
+}
+
+#else /* !AUDIO_STREAMING_DSP */
+
+static int Mixer_Main_SimpleAudio(struct SoundControl snd_ctrl)
+{
+ static short premix_first_buffer[SND_BLOCKSIZE];
+ static byte playing_buffer[SND_BLOCKSIZE];
+ int max_sample_size = SND_BLOCKSIZE;
+ void *sample_ptr;
+ int sample_len;
+ int sample_pos;
+ int sample_size;
+ int i, j;
+
+ i = 1;
+
+ /* pointer, lenght and actual playing position of sound sample */
+ sample_ptr = mixer[i].data_ptr;
+ sample_len = mixer[i].data_len;
+ sample_pos = mixer[i].playingpos;
+ sample_size = MIN(max_sample_size, sample_len - sample_pos);
+ mixer[i].playingpos += sample_size;
+
+ /* copy original sample to first mixing buffer */
+ CopySampleToMixingBuffer(&mixer[i], sample_pos, sample_size,
+ premix_first_buffer);
+
+ /* adjust volume of actual sound sample */
+ if (mixer[i].volume != PSND_MAX_VOLUME)
+ for(j=0; j<sample_size; j++)
+ premix_first_buffer[j] =
+ (mixer[i].volume * (long)premix_first_buffer[j])
+ >> PSND_MAX_VOLUME_BITS;
+
+ /* might be needed for u-law /dev/audio */
+#if 0
+ for(j=0; j<sample_size; j++)
+ playing_buffer[j] =
+ linear_to_ulaw(premix_first_buffer[j]);
+#endif
+
+ /* delete completed sound entries from the mixer */
+ if (mixer[i].playingpos >= mixer[i].data_len)
+ Mixer_RemoveSound(i);
+
+ for(i=0; i<sample_size; i++)
+ playing_buffer[i] = (premix_first_buffer[i] >> 8) ^ 0x80;
+
+ /* finally play the sound fragment */
+ write(audio.device_fd, playing_buffer, sample_size);
+
+ return sample_size;
+}
+#endif /* !AUDIO_STREAMING_DSP */
+
+void Mixer_Main()
+{
+ struct SoundControl snd_ctrl;
+ fd_set mixer_fdset;
+
+ close(audio.mixer_pipe[1]); /* no writing into pipe needed */
+
+ Mixer_InitChannels();
+
+#if defined(PLATFORM_HPUX)
+ InitAudioDevice(&afmt);
+#endif
+
+ FD_ZERO(&mixer_fdset);
+ FD_SET(audio.mixer_pipe[0], &mixer_fdset);
+
+ while(1) /* wait for sound playing commands from client */
+ {
+ struct timeval delay = { 0, 0 };
+
+ FD_SET(audio.mixer_pipe[0], &mixer_fdset);
+ select(audio.mixer_pipe[0] + 1, &mixer_fdset, NULL, NULL, NULL);
+ if (!FD_ISSET(audio.mixer_pipe[0], &mixer_fdset))
+ continue;
+
+ ReadSoundControlFromPipe(&snd_ctrl);
+
+ HandleSoundRequest(snd_ctrl);
+
+#if defined(AUDIO_STREAMING_DSP)
+
+ while (mixer_active_channels &&
+ select(audio.mixer_pipe[0] + 1,
+ &mixer_fdset, NULL, NULL, &delay) < 1)
+ {
+ FD_SET(audio.mixer_pipe[0], &mixer_fdset);
+
+ Mixer_Main_DSP();
+ }
+
+#else /* !AUDIO_STREAMING_DSP */
+
+ if (!snd_ctrl.active || IS_LOOP(snd_ctrl) ||
+ (audio.device_fd = OpenAudioDevice(audio.device_name)) < 0)
+ continue;
+
+ InitAudioDevice(&afmt);
+
+ delay.tv_sec = 0;
+ delay.tv_usec = 0;
+
+ while (mixer_active_channels &&
+ select(audio.mixer_pipe[0] + 1,
+ &mixer_fdset, NULL, NULL, &delay) < 1)
+ {
+ int wait_percent = 90; /* wait 90% of the real playing time */
+ int sample_size;
+
+ FD_SET(audio.mixer_pipe[0], &mixer_fdset);
+
+ sample_size = Mixer_Main_SimpleAudio(snd_ctrl);
+
+ delay.tv_sec = 0;
+ delay.tv_usec =
+ ((sample_size * 10 * wait_percent) / afmt.sample_rate) * 1000;
+ }
+
+ CloseAudioDevice(&audio.device_fd);
+
+ Mixer_InitChannels(); /* remove all sounds from mixer */
+
+#endif /* !AUDIO_STREAMING_DSP */
+ }
+}
+#endif /* PLATFORM_UNIX */
+
+
+/* ------------------------------------------------------------------------- */
+/* platform dependant audio initialization code */
+/* ------------------------------------------------------------------------- */
+
+#if defined(AUDIO_LINUX_IOCTL)
+static void InitAudioDevice_Linux(struct AudioFormatInfo *afmt)
+{
+ /* "ioctl()" expects pointer to 'int' value for stereo flag
+ (boolean is defined as 'char', which will not work here) */
+ unsigned int fragment_spec = 0;
+ int fragment_size_query;
+ int stereo = TRUE;
+ struct
+ {
+ int format_ioctl;
+ int format_result;
+ }
+ formats[] =
+ {
+ /* supported audio format in preferred order */
+ { AFMT_S16_LE, AUDIO_FORMAT_S16 | AUDIO_FORMAT_LE },
+ { AFMT_S16_BE, AUDIO_FORMAT_S16 | AUDIO_FORMAT_BE },
+ { AFMT_U8, AUDIO_FORMAT_U8 },
+ { -1, -1 }
+ };
+ int i;
+
+ /* determine logarithm (log2) of the fragment size */
+ while ((1 << fragment_spec) < afmt->fragment_size)
+ fragment_spec++;
+
+ /* use two fragments (play one fragment, prepare the other);
+ one fragment would result in interrupted audio output, more
+ than two fragments would raise audio output latency to much */
+ fragment_spec |= 0x00020000;
+
+ /* Example for fragment specification:
+ - 2 buffers / 512 bytes (giving 1/16 second resolution for 8 kHz)
+ - (with stereo the effective buffer size will shrink to 256)
+ => fragment_size = 0x00020009 */
+
+ if (ioctl(audio.device_fd, SNDCTL_DSP_SETFRAGMENT, &fragment_spec) < 0)
+ Error(ERR_EXIT_SOUND_SERVER,
+ "cannot set fragment size of /dev/dsp -- no sounds");
+
+ i = 0;
+ afmt->format = 0;
+ while (formats[i].format_result != -1)
+ {
+ unsigned int audio_format = formats[i].format_ioctl;
+ if (ioctl(audio.device_fd, SNDCTL_DSP_SETFMT, &audio_format) == 0)
+ {
+ afmt->format = formats[i].format_result;
+ break;
+ }
+ }
+
+ if (afmt->format == 0) /* no supported audio format found */
+ Error(ERR_EXIT_SOUND_SERVER,
+ "cannot set audio format of /dev/dsp -- no sounds");
+
+ /* try if we can use stereo sound */
+ afmt->stereo = TRUE;
+ if (ioctl(audio.device_fd, SNDCTL_DSP_STEREO, &stereo) < 0)
+ afmt->stereo = FALSE;
+
+ if (ioctl(audio.device_fd, SNDCTL_DSP_SPEED, &afmt->sample_rate) < 0)
+ Error(ERR_EXIT_SOUND_SERVER,
+ "cannot set sample rate of /dev/dsp -- no sounds");
+
+ /* get the real fragmentation size; this should return 512 */
+ if (ioctl(audio.device_fd, SNDCTL_DSP_GETBLKSIZE, &fragment_size_query) < 0)
+ Error(ERR_EXIT_SOUND_SERVER,
+ "cannot get fragment size of /dev/dsp -- no sounds");
+ if (fragment_size_query != afmt->fragment_size)
+ Error(ERR_EXIT_SOUND_SERVER,
+ "cannot set fragment size of /dev/dsp -- no sounds");
+}
+#endif /* AUDIO_LINUX_IOCTL */
+
+#if defined(PLATFORM_NETBSD)
+static void InitAudioDevice_NetBSD(struct AudioFormatInfo *afmt)
+{
+ audio_info_t a_info;
+ boolean stereo = TRUE;
+
+ AUDIO_INITINFO(&a_info);
+ a_info.play.encoding = AUDIO_ENCODING_LINEAR8;