Click here to Skip to main content
15,886,362 members
Articles / Desktop Programming / MFC

SDI (Sound Device Interface)--A library for Auditory Display

Rate me:
Please Sign up or sign in to vote.
4.60/5 (3 votes)
2 May 20046 min read 110.2K   3.7K   45  
A GDI-like API for 3D positioning of speech, and MIDI composition using a single string.
/******************************************************************************
SDI 1.0
    A library for Auditory Display
Copyright 2004 Dong Lin

This file is part of SDI.

    SDI is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    SDI is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with SDI; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA

Last Updated : Apr.15th, 2004

For any question, suggestion or failure report, please contact me by:
e-mail: jonathan1983@126.com
*******************************************************************************/
/***************************************************************************
[Copy Right Info]

File Name: SDI.cpp

Description:
	Implementation of SDI Evironment, speech object and earcon object
	The core code of SDI is layed below.

***************************************************************************/

#include "Stdafx.h"
#include <objbase.h>
#include <initguid.h>
#include <process.h>
#include <mmreg.h>
#include <mmsystem.h>
#include <stdio.h>
#include <dxerr9.h>
#include <math.h>
#include <string.h>

#include "ThirdParty\\eci.h"
#include "ThirdParty\\ia3dapi.h"
#include "ThirdParty\\ia3dutil.h"

#include "SDI.h"		// SDI.h must be included before SDIKernel.h
#include "SDIKernel.h"
#include "Speech.h"
#include "Earcon.h"


// Constants
const int INIT_HANDLE_HEAP_SIZE=4*1024;
const int INIT_SPEECH_HEAP_SIZE=4*1024;
const int INIT_EARCON_HEAP_SIZE=4*1024;

// Threading
typedef unsigned int (__stdcall *PTHREADPROC)(void *);
typedef DWORD (__stdcall *SDICALLBACK)(DWORD dwMsg,LPVOID pData);

#define BEGINTHREADEX(psa,cbStack,pfnStartAddr,pvParam,fdwCreate,pdwThreadID) \
	((HANDLE)_beginthreadex((void*)(psa),(unsigned int)(cbStack),			  \
	(PTHREADPROC)(pfnStartAddr),(void*)(pvParam),(unsigned int)(fdwCreate),	  \
	(unsigned int*)(pdwThreadID)))			

/***********************
 Global Variables
***********************/
// SDI Environment.
LPGUID			g_audioDevice=NULL;
DWORD			g_dwA3dStyle=0;
IA3d5*			g_pA3d=NULL;

// Earcon
IDirectMusic8*		 g_pMusic=NULL;
IDirectSound8*		 g_pSound=NULL;
IDirectMusicLoader8* g_pLoader=NULL;
IDirectMusicCollection8* g_pGMCollection=NULL;

HANDLE			g_hDSBSync=NULL;
LONGLONG		g_channelGroupBitmap=0;

// Handle Management
HANDLE			g_hHandleHeap=NULL;
SDISTATLIST*	g_handleHead=NULL;
CRITICAL_SECTION	g_csHandle;

HANDLE			g_hSpeechHeap=NULL;
SDISPEECHLIST*	g_speechHead=NULL;
CRITICAL_SECTION	g_csSpeech;

HANDLE			g_hEarconHeap=NULL;
SDIEARCONLIST*	g_earconHead=NULL;
CRITICAL_SECTION	g_csEarcon;

// Preset Voice
const BYTE g_VoicePresetParam[8][8]=
	{	{0,0,50,65,30,0,50,92},
		{50,1,50,81,30,0,50,100},
		{0,1,22,93,35,0,50,90},
		{0,0,86,56,47,0,50,93},
		{0,0,50,69,34,0,70,92},
		{40,1,56,89,35,0,70,95},
		{40,1,45,68,30,3,50,90},
		{20,0,30,61,44,18,50,90}};

///////////////////////////////////////////////////////////////////////
// Helper Function
void TraceError(TCHAR* psError)
{
	HLOCAL text=NULL;
	DWORD dw=GetLastError();
	FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
		NULL,dw,MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),(TCHAR*)&text,0,NULL);
	_RPT0(_CRT_WARN,psError);
	_RPT0(_CRT_WARN," Message: ");
	_RPT0(_CRT_WARN,(TCHAR*)text);
}

void ResetSource(IA3dSource2* pSource)
{
	LPVOID p1,p2;
	DWORD size1,size2;
	pSource->Stop();
	pSource->Rewind();
	pSource->Lock(0,0, &p1,&size1,&p2,&size2, A3D_ENTIREBUFFER);
	ZeroMemory(p1,size1);
	pSource->Unlock(p1,size1,p2,size2);
}

BOOL ReleaseCommonData(COMMONDATA* pData)
{
	if(pData == NULL)
		return FALSE;
	if(pData->pSource)
	{
		pData->pSource->Stop();
		pData->pSource->FreeAudioData();
		pData->pSource->Release();
		pData->pSource=NULL;
	}
	CloseHandle(pData->semaphore);
	DeleteCriticalSection(&pData->cs);
	/*
	LISTHEADER* plist;
	MSGLIST* pMsg;
	while(plist=pData->msgHead.list.pNext)
	{
		pMsg=listGetStruct(plist, MSGLIST, list);
		listRemoveItem(plist);
		delete pMsg;
	}
	*/
	return TRUE;
}

BOOL InitCommonData(COMMONDATA* pData)
{
	pData->fGain=1.0f;
	pData->dwThreadCreate=GetCurrentThreadId();
	pData->semaphore=CreateSemaphore(NULL,0,SDI_SNDOBJECT_MAX_MESSAGE,NULL);
	return TRUE;
}

BOOL PostSDIMessage(COMMONDATA* pData, DWORD dwMessage, LPARAM lParam, PVOID pvData)
{
	if(pData==NULL)
		return FALSE;
	MSGLIST* pMsg=new MSGLIST;
	pMsg->dwMessage=dwMessage;
	pMsg->lParam=lParam;
	if(dwMessage & 0x80000000)
	{
		// if dwMessage>=0x80000000, retreat pvData as a data pointer 
		// and lParam the number of bytes pvData points to.
		memcpy(pMsg->pvData, pvData, lParam);
	}
	listAddTail(&pData->msgHead.list,&pMsg->list);
	ReleaseSemaphore(pData->semaphore,1,NULL);
	return TRUE;
}

/*************************************************************************
				SDI Environment
*************************************************************************/
LRESULT CALLBACK SDIWndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
{
	switch(uMsg)
	{
	default:
		break;
	}
	return DefWindowProc(hWnd,uMsg,wParam,lParam);
}

BOOL InitializeSDI(LPGUID audioDevice, DWORD a3dStyle, HINSTANCE hInstance, DWORD dwFlag)
{
	HRESULT		hr;
	if(hInstance == NULL)
		return FALSE;

	// Make a copy of parameters
	if(audioDevice)
	{
		g_audioDevice=new GUID;
		memcpy(g_audioDevice,audioDevice,sizeof(GUID));
	}
	g_dwA3dStyle=a3dStyle;

	// Set up A3D object
	WNDCLASSEX	wc;
	ZeroMemory(&wc,sizeof(WNDCLASSEX));
	wc.cbSize=sizeof(WNDCLASSEX);
	wc.hInstance=hInstance;	
	wc.lpszClassName="SUI Hiden Window Class";
	wc.lpfnWndProc=(WNDPROC)SDIWndProc;
	RegisterClassEx(&wc);
	HWND hWnd=CreateWindowEx(0,"SUI Hiden Window Class","SUI Hiden Window",
		0,0,0,0,0,NULL,NULL,hInstance,NULL);
	
	hr=A3dInitialize();
	_ASSERTE(SUCCEEDED(hr));
	hr = CoCreateInstance(CLSID_A3dApi, NULL, CLSCTX_INPROC_SERVER, 
				IID_IA3d5, (void**)&g_pA3d);
	_ASSERTE(SUCCEEDED(hr));
	hr = g_pA3d->InitEx(NULL, A3D_DISABLE_FOCUS_MUTE, A3DRENDERPREFS_DEFAULT, hWnd , A3D_CL_NORMAL);
	_ASSERTE(SUCCEEDED(hr));
	
	// Set up for Handle Management
	g_hHandleHeap=HeapCreate(HEAP_REALLOC_IN_PLACE_ONLY, INIT_HANDLE_HEAP_SIZE, 0);
	_ASSERTE(g_hHandleHeap);
	InitializeCriticalSection(&g_csHandle);

	g_hSpeechHeap=HeapCreate(HEAP_REALLOC_IN_PLACE_ONLY, INIT_SPEECH_HEAP_SIZE, 0);
	_ASSERTE(g_hSpeechHeap);
	InitializeCriticalSection(&g_csSpeech);

	g_hEarconHeap=HeapCreate(HEAP_REALLOC_IN_PLACE_ONLY, INIT_EARCON_HEAP_SIZE, 0);
	_ASSERTE(g_hEarconHeap);
	InitializeCriticalSection(&g_csEarcon);
	
	// Initialize DMusic for MIDI support
	// Trival. Because A3dInitialize() call CoInitialize(NULL) internally
	//CoInitialize(NULL);

	// DirectMusic Object
	hr=CoCreateInstance(CLSID_DirectMusic, NULL,
	   CLSCTX_INPROC, IID_IDirectMusic8,
	   (void**)&g_pMusic );
	_ASSERTE(SUCCEEDED(hr));

	// Loader
	hr=CoCreateInstance(CLSID_DirectMusicLoader, NULL, CLSCTX_INPROC, 
		IID_IDirectMusicLoader8, (void**)&g_pLoader);
	_ASSERTE(SUCCEEDED(hr));

	// Defalut GM Collection
	DMUS_OBJECTDESC dmusdesc;
	ZeroMemory(&dmusdesc,sizeof(DMUS_OBJECTDESC));
	dmusdesc.dwSize = sizeof(DMUS_OBJECTDESC);
	dmusdesc.guidClass = CLSID_DirectMusicCollection; 
	dmusdesc.guidObject	= GUID_DefaultGMCollection;
	dmusdesc.dwValidData = DMUS_OBJ_CLASS | DMUS_OBJ_OBJECT;
	hr=g_pLoader->GetObject(&dmusdesc,IID_IDirectMusicCollection8,(void**)&g_pGMCollection);
	_ASSERTE(SUCCEEDED(hr));

	// Direct Sound
	hr=CoCreateInstance(CLSID_DirectSound8, NULL,
	   CLSCTX_INPROC, IID_IDirectSound8,
	   (void**)&g_pSound );
	_ASSERTE(SUCCEEDED(hr));
	hr=g_pSound->Initialize(NULL);
	_ASSERTE(SUCCEEDED(hr));
	hr=g_pSound->SetCooperativeLevel(hWnd, DSSCL_PRIORITY );
	_ASSERTE(SUCCEEDED(hr));
	// Connect to DirectMusic object
	hr=g_pMusic->SetDirectSound(g_pSound,NULL);
	
	return TRUE;
}

BOOL ReleaseSDI()
{
	// Walk the list to delete all the suspending objects
	SDISTATLIST *p1=g_handleHead, *p2;
	while(p1)
	{
		if(p1->list.pNext != NULL)
			p2=listGetStruct(p1->list.pNext, SDISTATLIST, list);
		else
			p2=NULL;
		DeleteSDIObject(p1->hSDIObject);
		p1=p2;
	}
	g_handleHead=NULL;	

	DeleteCriticalSection(&g_csHandle);
	DeleteCriticalSection(&g_csSpeech);
	DeleteCriticalSection(&g_csEarcon);

	// Clean up A3D
	if(g_audioDevice)
		delete g_audioDevice;
	if(g_pA3d)
	{
		g_pA3d->Release();
		g_pA3d=NULL;
	}
	//A3dUninitialize();	// Replaced by CoUnintialize

	// Clean up DirectMusic
	if(g_pGMCollection)
		g_pGMCollection->Release();
	if(g_pLoader)
		g_pLoader->Release();
	
	if(g_pSound)
		g_pSound->Release();
	
	if(g_pMusic)
		g_pMusic->Release();
	CoUninitialize();

	CloseHandle(g_hDSBSync);

	// Clean up Handle Management
	HeapDestroy(g_hHandleHeap);
	HeapDestroy(g_hSpeechHeap);
	HeapDestroy(g_hEarconHeap);

	return TRUE;
}
/*****************************************************************
				Generic SDI Object Operation
*****************************************************************/
BOOL	DeleteSDIObject(HSDIOBJECT hObject)
{
	if(hObject==NULL)
		return FALSE;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hObject));
	
	Stop(hObject);

	switch(GETHANDLETYPE(hObject))
	{
	case SDI_TYPE_SPEECH:
		{
			SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hObject));
			__try
			{
				EnterCriticalSection(&pSpeech->data.cs);
				SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hObject));
				PostSDIMessage(&pSpeech->data, MSG_SPEECH_QUIT, 0, NULL);
				ResetSource(pSpeech->data.pSource);
				ResetSpeech(pSpeech);
				ReleaseCommonData(&pSpeech->data);
				
				CloseHandle(pSpeech->hSync);
				CloseHandle(pSpeech->hSyncEnd);
				HeapFree(g_hHandleHeap,0,pHandle);
				HeapFree(g_hSpeechHeap,0,pSpeech);
			}
			__except(EXCEPTION_EXECUTE_HANDLER)
			{
				LeaveCriticalSection(&pSpeech->data.cs);
				return FALSE;
			}
			LeaveCriticalSection(&pSpeech->data.cs);
		}
		break;
	case SDI_TYPE_EARCON:
		{
			SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hObject));
			__try
			{
				EnterCriticalSection(&pEarcon->data.cs);
				SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hObject));
				PostSDIMessage(&pEarcon->data, MSG_EARCON_QUIT, 0, NULL);
				//Sleep(100);
				ResetSource(pEarcon->data.pSource);
				ResetEarcon(pEarcon);
				ReleaseCommonData(&pEarcon->data);
				for(int i=0; i<SDI_EARCON_NUM_CHANNEL; i++)
				{
					if(pEarcon->pDownloadedInstruments[i])
					{
						HRESULT hr;
						hr=pEarcon->pPort->UnloadInstrument(pEarcon->pDownloadedInstruments[i]);
						_ASSERTE(SUCCEEDED(hr));
						hr=pEarcon->pDownloadedInstruments[i]->Release();
						_ASSERTE(SUCCEEDED(hr));
					}
				}
				pEarcon->pMusicBuffer->Release();
				if(pEarcon->pSoundBuffer)
					pEarcon->pSoundBuffer->Release();
				if(pEarcon->pClock)
					pEarcon->pClock->Release();
				if(pEarcon->pPort)
					pEarcon->pPort->Release();
				HeapFree(g_hHandleHeap,0,MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hObject)));
				HeapFree(g_hEarconHeap,0,pEarcon);
			}
			__except(EXCEPTION_EXECUTE_HANDLER)
			{
				LeaveCriticalSection(&pEarcon->data.cs);
				return FALSE;
			}
			LeaveCriticalSection(&pEarcon->data.cs);
		}
		break;
	default:
		break;
	}
	return TRUE;
}

BOOL	Play(HSDIOBJECT hObject)
{
	if(hObject==NULL)
		return FALSE;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hObject));
	
	switch(GETHANDLETYPE(hObject))
	{
	case SDI_TYPE_SPEECH:
		{
			SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hObject));
			if(pSpeech->data.dwStatus & STATUS_SPEECH_PLAYING)
				return FALSE;
			__try
			{
				EnterCriticalSection(&pSpeech->data.cs);
				PostSDIMessage(&pSpeech->data, MSG_SPEECH_SYNTHESIZE, 0, NULL);
			}
			__except(EXCEPTION_EXECUTE_HANDLER)
			{
				LeaveCriticalSection(&pSpeech->data.cs);
				return FALSE;
			}
			LeaveCriticalSection(&pSpeech->data.cs);
		}
		break;
		break;
	case SDI_TYPE_EARCON:
		{
			SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hObject));
			if(pEarcon->data.dwStatus & STATUS_EARCON_PLAYING)
				return FALSE;
			PostSDIMessage(&pEarcon->data, MSG_EARCON_PLAY, 0, NULL);
		}
		break;
	default:
		break;
	}
	return TRUE;
}

BOOL	Stop(HSDIOBJECT hObject)
{
	if(hObject==NULL)
		return FALSE;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hObject));
	
	switch(GETHANDLETYPE(hObject))
	{
	case SDI_TYPE_SPEECH:
		{
			SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hObject));
			SPEECHSENTENCE* p1=pSpeech->pSentenceHead;
			SPEECHSENTENCE* p2;
			
			__try
			{
				EnterCriticalSection(&pSpeech->data.cs);
				// Empty the sentence list
				while(p1->list.pNext)
				{
					p2 = listGetStruct(p1->list.pNext, SPEECHSENTENCE, list);
					delete p1;
					p1=p2;
				}
				delete p1;
				p1=NULL;
				pSpeech->pSentenceHead=NULL;
				
				pSpeech->data.pSource->Stop();
				// To tell the eciSynchronize to return immediately
				SetEvent(pSpeech->hSyncEnd);
			}
			__except(EXCEPTION_EXECUTE_HANDLER)
			{
				LeaveCriticalSection(&pSpeech->data.cs);
				return FALSE;
			}
			LeaveCriticalSection(&pSpeech->data.cs);
		}
		break;
	case SDI_TYPE_EARCON:
		{
			SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hObject));
			
			EnterCriticalSection(&pEarcon->data.cs);
			PostSDIMessage(&pEarcon->data, MSG_EARCON_STOP, 0, NULL);
			
			RewindEarcon(pEarcon);
			LeaveCriticalSection(&pEarcon->data.cs);
		}
		break;
	default:
		break;
	}
	return TRUE;
}

BOOL	Pause(HSDIOBJECT hObject)
{
	if(hObject==NULL)
		return FALSE;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hObject));
	
	switch(GETHANDLETYPE(hObject))
	{
	case SDI_TYPE_SPEECH:
		{
			SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hObject));
			SPEECHSENTENCE* p1=pSpeech->pSentenceHead;
			SPEECHSENTENCE* p2;
			
			__try
			{
				EnterCriticalSection(&pSpeech->data.cs);
				
				pSpeech->data.dwStatus |= STATUS_SPEECH_PAUSED;

				pSpeech->data.pSource->Stop();
				// To tell the eciSynchronize to return immediately
				SetEvent(pSpeech->hSyncEnd);
			}
			__except(EXCEPTION_EXECUTE_HANDLER)
			{
				LeaveCriticalSection(&pSpeech->data.cs);
				return FALSE;
			}
			LeaveCriticalSection(&pSpeech->data.cs);
		}
		break;
	case SDI_TYPE_EARCON:
		{
			SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hObject));
			EnterCriticalSection(&pEarcon->data.cs);
			PostSDIMessage(&pEarcon->data, MSG_EARCON_STOP, 0, NULL);
			LeaveCriticalSection(&pEarcon->data.cs);
		}
		break;
	default:
		break;
	}
	return TRUE;
}

BOOL	SetPosition(HSDIOBJECT hObject, SDIVECTOR* pos)
{
	if(hObject==NULL || pos==NULL)
		return FALSE;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hObject));
	
	switch(GETHANDLETYPE(hObject))
	{
	case SDI_TYPE_SPEECH:
		{
			SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hObject));
			__try
			{
				EnterCriticalSection(&pSpeech->data.cs);
				// Attention!! A3D use right-handed coordinate system!!
				HRESULT hr=pSpeech->data.pSource->SetPosition3f(pos->x, pos->y, -pos->z);
				hr=g_pA3d->Flush();
				if(FAILED(hr))
					return FALSE;
			}
			__except(EXCEPTION_EXECUTE_HANDLER)
			{
				LeaveCriticalSection(&pSpeech->data.cs);
				return FALSE;
			}
			LeaveCriticalSection(&pSpeech->data.cs);
		}
		break;
	case SDI_TYPE_EARCON:
		{
			SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hObject));
			
			// Further refinement needed for position the sound in 3D world.
			pEarcon->p3DBuffer->SetPosition(pos->x, pos->y, -pos->z, DS3D_IMMEDIATE);
		}
		break;
	default:
		break;
	}
	return TRUE;
}

BOOL	GetPosition(HSDIOBJECT hObject, SDIVECTOR* pos)
{
	if(hObject==NULL)
		return FALSE;
	
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hObject));
	
	switch(GETHANDLETYPE(hObject))
	{
	case SDI_TYPE_SPEECH:
		{
			SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hObject));
			__try
			{
				EnterCriticalSection(&pSpeech->data.cs);
				HRESULT hr=pSpeech->data.pSource->GetPosition3f(&pos->x, &pos->y, &pos->z);
				if(FAILED(hr))
					return FALSE;
			}
			__except(EXCEPTION_EXECUTE_HANDLER)
			{
				LeaveCriticalSection(&pSpeech->data.cs);
				return FALSE;
			}
			LeaveCriticalSection(&pSpeech->data.cs);
		}
		break;
	case SDI_TYPE_EARCON:
		{
			SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hObject));
			
			pEarcon->p3DBuffer->GetPosition((D3DVECTOR*)pos);
		}
		break;
	default:
		break;
	}
	return TRUE;
}

BOOL	SetVolume(HSDIOBJECT hObject, FLOAT fGain)
{
	if(hObject==NULL)
		return FALSE;
	if(fGain<0 || fGain>1.0f)
		return FALSE;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hObject));
	
	switch(GETHANDLETYPE(hObject))
	{
	case SDI_TYPE_SPEECH:
		{
			SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hObject));
			__try
			{
				EnterCriticalSection(&pSpeech->data.cs);
				HRESULT hr=pSpeech->data.pSource->SetGain(fGain);
				if(FAILED(hr))
					return FALSE;
				hr=g_pA3d->Flush();
				if(FAILED(hr))
					return FALSE;
			}
			__except(EXCEPTION_EXECUTE_HANDLER)
			{
				LeaveCriticalSection(&pSpeech->data.cs);
				return FALSE;
			}
			LeaveCriticalSection(&pSpeech->data.cs);
		}
		break;
	case SDI_TYPE_EARCON:
		{
			SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hObject));
			pEarcon->pSoundBuffer->SetVolume((LONG)(2000*log10((double)fGain+0.01)));
		}
		break;
	default:
		break;
	}
	return TRUE;
}

FLOAT	GetVolume(HSDIOBJECT hObject)
{
	if(hObject==NULL)
		return 0;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hObject));
	
	FLOAT fGain=0;
	switch(GETHANDLETYPE(hObject))
	{
	case SDI_TYPE_SPEECH:
		{
			SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hObject));
			__try
			{
				EnterCriticalSection(&pSpeech->data.cs);
				HRESULT hr=pSpeech->data.pSource->GetGain(&fGain);
				if(FAILED(hr))
					return 0;
			}
			__except(EXCEPTION_EXECUTE_HANDLER)
			{
				LeaveCriticalSection(&pSpeech->data.cs);
				return 0;
			}
			LeaveCriticalSection(&pSpeech->data.cs);
		}
		break;
	case SDI_TYPE_EARCON:
		{
			SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hObject));
			LONG l;
			pEarcon->pSoundBuffer->GetVolume(&l);
			fGain=-(FLOAT)l/10000;
		}
		break;
	default:
		break;
	}
	return fGain;
}

/*****************************************************************
				SDI Object: Speech Object
							(HSPEECH)
*****************************************************************/
BOOL GetPresetVoice(int nIndex,	VOICEPARAM* pVoice)
{
	if(pVoice==NULL || nIndex<0 || nIndex>7)
		return FALSE;
	memcpy(pVoice,g_VoicePresetParam[nIndex],8);
	return TRUE;
}

BOOL ResetSpeech(SDISPEECHLIST* pSpeech)
{
	if(pSpeech == NULL)
		return FALSE;
	pSpeech->data.dwStatus &= ~STATUS_SPEECH_FILL2NDHALF;
	pSpeech->data.dwStatus &= ~STATUS_SPEECH_PLAYING;
	ResetEvent(pSpeech->hSync);
	ResetEvent(pSpeech->hSyncEnd);
	return TRUE;
}

BOOL _setvoice(ECIHand hECIHand,VOICEPARAM* pVoice)
{
	int nResult=0;
	nResult+=eciSetVoiceParam(hECIHand,0,eciBreathiness,pVoice->breathiness);
	nResult+=eciSetVoiceParam(hECIHand,0,eciGender,pVoice->gender);
	nResult+=eciSetVoiceParam(hECIHand,0,eciHeadSize,pVoice->headSize);
	nResult+=eciSetVoiceParam(hECIHand,0,eciPitchBaseline,pVoice->pitchBaseline);
	nResult+=eciSetVoiceParam(hECIHand,0,eciPitchFluctuation,pVoice->pitchFluctuation);
	nResult+=eciSetVoiceParam(hECIHand,0,eciRoughness,pVoice->roughness);
	nResult+=eciSetVoiceParam(hECIHand,0,eciSpeed,pVoice->speed);
	nResult+=eciSetVoiceParam(hECIHand,0,eciVolume,pVoice->volume);
	return (nResult?FALSE:TRUE);
}

ECICallbackReturn SpeechCallback(ECIHand heci,ECIMessage msg,long lparam,void* pdata)
{
	SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)pdata;

	TraceError("SpeechCallbackFunc called.");
	short* buffer=pSpeech->buffer;
	switch(msg)
	{
	case eciPhonemeBuffer:
		break;
	case eciWaveformBuffer:
		{
			// Write the buffer.
			LPVOID p1,p2;
			DWORD size1,size2,dw;
			
			if(pSpeech->data.dwStatus & STATUS_SPEECH_FILL2NDHALF)
			{
				// Write the 2nd half of the A3D source buffer
				pSpeech->data.pSource->Lock(SDI_SPEECH_BUFFERSIZE*sizeof(WORD),
					SDI_SPEECH_BUFFERSIZE*sizeof(WORD), &p1,&size1,&p2,&size2,0);
				_RPT2(_CRT_WARN,"2nd half.size1=%u,size2=%u\n",size1,size2);
				memset(p1,0,size1);
				memcpy(p1,pSpeech->buffer,lparam*sizeof(WORD));
				pSpeech->data.pSource->Unlock(p1,size1,p2,size2);
				
				pSpeech->data.dwStatus &= ~STATUS_SPEECH_FILL2NDHALF;
			}
			else
			{
				// Write the 1st half of the A3D source buffer
				pSpeech->data.pSource->Lock(0,SDI_SPEECH_BUFFERSIZE*sizeof(WORD),
					&p1,&size1,&p2,&size2,0);
				_RPT2(_CRT_WARN,"1st half.size1=%u,size2=%u\n",size1,size2);
				memset(p1,0,size1);
				memcpy(p1,pSpeech->buffer,lparam*sizeof(WORD));
				pSpeech->data.pSource->Unlock(p1,size1,p2,size2);
				
				pSpeech->data.dwStatus |= STATUS_SPEECH_FILL2NDHALF;
			}
			if(!(pSpeech->data.dwStatus & STATUS_SPEECH_PLAYING))
			{
				// Start playing the source
				pSpeech->data.dwStatus |= STATUS_SPEECH_PLAYING;
				pSpeech->data.pSource->Play(A3D_LOOPED);
				g_pA3d->Flush();
			}
			// Wait for the play cursor to triger the event or be stopped by the user
			HANDLE pHandles[2]={pSpeech->hSync, pSpeech->hSyncEnd};
			dw=WaitForMultipleObjects(2, pHandles, FALSE, INFINITE);
			if(lparam < SDI_SPEECH_BUFFERSIZE)
			{
				// The last block of data have been filled.
				// So clear the other half.
				if(pSpeech->data.dwStatus & STATUS_SPEECH_FILL2NDHALF)
				{
					pSpeech->data.pSource->Lock(SDI_SPEECH_BUFFERSIZE*sizeof(WORD),
						SDI_SPEECH_BUFFERSIZE*sizeof(WORD), &p1,&size1,&p2,&size2,0);
					memset(p1,0,size1);
					pSpeech->data.pSource->Unlock(p1,size1,p2,size2);
					
					pSpeech->data.dwStatus &= ~STATUS_SPEECH_FILL2NDHALF;
				}
				else
				{
					pSpeech->data.pSource->Lock(0,SDI_SPEECH_BUFFERSIZE*sizeof(WORD),
						&p1,&size1,&p2,&size2,0);
					memset(p1,0,size1);
					pSpeech->data.pSource->Unlock(p1,size1,p2,size2);
					
					pSpeech->data.dwStatus |= STATUS_SPEECH_FILL2NDHALF;
				}
				// Wait for the last half to be played.
				WaitForMultipleObjects(2, pHandles, FALSE, INFINITE);
				pSpeech->data.pSource->Stop();
				return eciDataAbort;
			}
			if(dw == WAIT_OBJECT_0)
			{
				return eciDataProcessed;
			}
			else if(dw == WAIT_OBJECT_0+1)
			{
				return eciDataAbort;	// Stop synthesizing
			}
			else
				return eciDataNotProcessed;
		}
		break;
	default:
		break;
	}
	return eciDataProcessed;
}

DWORD WINAPI SpeechSynthThread(PVOID pvContext)
{
	SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)pvContext;
	
	BOOL br=eciSynchronize(pSpeech->hECI);// Start calling SpeechCallback repeatedly.
	if(!br)
	{
		char buffer[100];
		eciErrorMessage(pSpeech->hECI, buffer);
	}
	ResetSource(pSpeech->data.pSource);
	ResetSpeech(pSpeech);
	PostSDIMessage(&pSpeech->data, MSG_SPEECH_SENTENCECOMPLETE, 0, NULL);
	return 0;
}

DWORD WINAPI SDISpeechThread(PVOID pvContext)
{
	HSPEECH hSpeech=(HSPEECH)pvContext;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hSpeech));

	SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hSpeech));
	
	// Set up ECI
	pSpeech->hECI=eciNew();
	_ASSERTE(pSpeech->hECI);
	SetECIStyle(hSpeech, pSpeech->dwECIStyle);
	SetVoice(hSpeech, pSpeech->pVoice);
	
	pSpeech->buffer=new short[SDI_SPEECH_BUFFERSIZE];
	if(pSpeech->buffer==NULL)
	{
		SetLastError(ERROR_NOT_ENOUGH_MEMORY);
		return NULL;
	}
	eciRegisterCallback(pSpeech->hECI,SpeechCallback,pSpeech);
	// Setting up buffer for synthesis
	eciSetOutputBuffer(pSpeech->hECI,SDI_SPEECH_BUFFERSIZE/*16-bit sample*/
		,pSpeech->buffer);

	HANDLE handle[1]={pSpeech->data.semaphore};
	DWORD dw;
	BOOL br;
	BOOL bQuit=FALSE;
	while(!bQuit)
	{
		dw=WaitForMultipleObjects(1,handle,FALSE,INFINITE);
		EnterCriticalSection(&pSpeech->data.cs);
		switch(dw)
		{
		case WAIT_OBJECT_0:
			{
				LISTHEADER* plist=pSpeech->data.msgHead.list.pNext;
				//LISTHEADER* plist=listGetTail(&pSpeech->data.msgHead.list);
				MSGLIST* pMsg=listGetStruct(plist, MSGLIST, list);
				listRemoveItem(plist);
				switch(pMsg->dwMessage)
				{
				case MSG_SPEECH_SYNTHESIZE:
					{
						if(pSpeech->pSentenceHead == NULL)
							goto NEXT_MSG;
						br=eciAddText(pSpeech->hECI, pSpeech->pSentenceHead->psText);
						if(!br)
						{
							char buffer[100];
							eciErrorMessage(pSpeech->hECI, buffer);
						}
						br=eciSynthesize(pSpeech->hECI);
						if(!br)
						{
							char buffer[100];
							eciErrorMessage(pSpeech->hECI, buffer);
						}
						pSpeech->data.dwStatus &= ~STATUS_SPEECH_PAUSED;
						QueueUserWorkItem(SpeechSynthThread, 
							pSpeech, WT_EXECUTELONGFUNCTION);
					}
					break;
				case MSG_SPEECH_QUIT:
					bQuit=TRUE;
					break;
				case MSG_SPEECH_SENTENCECOMPLETE:
					{
						if(pSpeech->pSentenceHead == NULL)
							break;
						if(pSpeech->pSentenceHead->list.pNext == NULL)
						{
							delete pSpeech->pSentenceHead;
							pSpeech->pSentenceHead=NULL;
							break;
						}
						
						if(pSpeech->pSentenceHead !=NULL && pSpeech->pSentenceHead->list.pNext != NULL)
						{
							SPEECHSENTENCE* pNext=listGetStruct(pSpeech->pSentenceHead->list.pNext, SPEECHSENTENCE, list);
							if(pSpeech->data.dwStatus & STATUS_SPEECH_PAUSED)
							{
								// Do not start the next synthesis
								delete pSpeech->pSentenceHead;
								pSpeech->pSentenceHead=pNext;
								break;
							}
							BOOL br=eciAddText(pSpeech->hECI, pNext->psText);
							_ASSERTE(br);
							br=eciSynthesize(pSpeech->hECI);
							_ASSERTE(br);
							QueueUserWorkItem(SpeechSynthThread, pSpeech, 0);
							// Remove the sentence from sentence list
							delete pSpeech->pSentenceHead;
							pSpeech->pSentenceHead=pNext;
						}
					}
					break;
				default:
					break;
				}
				delete pMsg;
			}
			break;
		}
NEXT_MSG:
		LeaveCriticalSection(&pSpeech->data.cs);
	}

	// Clean up ECI
	delete[] pSpeech->buffer;
	eciDelete(pSpeech->hECI);
	return 0;
}

HSPEECH CreateSpeech(DWORD dwECIStyle, VOICEPARAM* pVoice, DWORD dwFlag)
{
	HSPEECH hSpeech=MAKEHANDLETYPE(SDI_TYPE_SPEECH);	// 1
	HRESULT hr;

	SDISTATLIST* pHandle=HEAPALLOC_STRUCT(g_hHandleHeap, SDISTATLIST);
	_ASSERTE(pHandle);
	if(g_handleHead != NULL)
	{
		hSpeech |= MAKEHANDLESTATOFFSET(GETOFFSET(pHandle, g_handleHead));	// 2
		if(GETOFFSET(pHandle, g_handleHead) > SDI_HANDLE_MAX_STATOFFSET)
			return NULL;
	}

	SDISPEECHLIST* pSpeech=HEAPALLOC_STRUCT(g_hSpeechHeap, SDISPEECHLIST);
	_ASSERTE(pSpeech);
	if(g_speechHead == NULL)
		g_speechHead=pSpeech;
	if(GETOFFSET(pSpeech, g_speechHead) > SDI_HANDLE_MAX_OFFSET)
		return NULL;
	// Complete the handle
	hSpeech |= MAKEHANDLEOFFSET(GETOFFSET(pSpeech, g_speechHead));		// 3

	// Initialize speech object
	
	pSpeech->hSync=CreateEvent(NULL,FALSE,FALSE,NULL);
	pSpeech->hSyncEnd=CreateEvent(NULL,FALSE,FALSE,NULL);
	InitCommonData(&pSpeech->data);
	
	// Set up A3D source for "Displaying"
	hr=g_pA3d->NewSource(A3DSOURCE_TYPEDEFAULT,&(pSpeech->data.pSource));
	_ASSERTE(SUCCEEDED(hr));

	WAVEFORMATEX wfx;
	ZeroMemory(&wfx,sizeof(WAVEFORMATEX));
	wfx.cbSize=sizeof(WAVEFORMATEX);
	wfx.wFormatTag=WAVE_FORMAT_EXTENSIBLE;
	wfx.wBitsPerSample=16;
	wfx.nBlockAlign=2;
	if(pSpeech->dwECIStyle & SDI_SAMPLERATE_11024)
		wfx.nSamplesPerSec=11024;
	else if(pSpeech->dwECIStyle & SDI_SAMPLERATE_22048)
		wfx.nSamplesPerSec=22048;
	else
		wfx.nSamplesPerSec=8000;
	wfx.nChannels=1;
	wfx.nAvgBytesPerSec=wfx.nSamplesPerSec*wfx.nBlockAlign;
	hr=pSpeech->data.pSource->SetAudioFormat(&wfx);
	_ASSERTE(SUCCEEDED(hr));

	// Double buffering
	hr=pSpeech->data.pSource->AllocateAudioData(SDI_SPEECH_BUFFERSIZE*sizeof(WORD)*2);
	_ASSERTE(SUCCEEDED(hr));
	ResetSource(pSpeech->data.pSource);
	hr=pSpeech->data.pSource->SetPlayEvent(SDI_SPEECH_INTERVAL, pSpeech->hSync);
	_ASSERTE(SUCCEEDED(hr));
	hr=pSpeech->data.pSource->SetPlayEvent(
		SDI_SPEECH_BUFFERSIZE*sizeof(WORD)+SDI_SPEECH_INTERVAL, pSpeech->hSync);
	_ASSERTE(SUCCEEDED(hr));
	hr=pSpeech->data.pSource->SetPlayEvent(A3DSOURCE_WAVEEVENT_STOP, pSpeech->hSyncEnd);
	_ASSERTE(SUCCEEDED(hr));

	// Copy ECI parameters
	pSpeech->dwECIStyle=dwECIStyle;
	if(pVoice < (VOICEPARAM*)16)
		pSpeech->pVoice=pVoice;
	
	InitializeCriticalSection(&pSpeech->data.cs);

	// Add the speech object to statistical list
	pHandle->hSDIObject=hSpeech;
	pHandle->dwThreadCreate=GetCurrentThreadId();
	if(g_handleHead == NULL)
		g_handleHead=pHandle;
	else
		listAddTail(&g_handleHead->list, &pHandle->list);

	// Start the SDISpeechThread.
	QueueUserWorkItem(SDISpeechThread, (PVOID)hSpeech, WT_EXECUTELONGFUNCTION);

	return hSpeech;
}

BOOL	PlayText(HSPEECH hSpeech, PCTSTR psText)
{
	if(hSpeech==NULL || psText==NULL)
		return FALSE;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hSpeech));

	Stop(hSpeech);
	Sleep(100);

	SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hSpeech));
	SPEECHSENTENCE* pSentence=new SPEECHSENTENCE;
	if(pSentence == NULL)
		return FALSE;
	listInitHead(&pSentence->list);
	pSentence->psText=_tcsdup(psText);
	
	__try
	{
		EnterCriticalSection(&pSpeech->data.cs);
		pSpeech->pSentenceHead=pSentence;
		PostSDIMessage(&pSpeech->data, MSG_SPEECH_SYNTHESIZE,0,NULL);
	}
	__except(EXCEPTION_EXECUTE_HANDLER)
	{
		LeaveCriticalSection(&pSpeech->data.cs);
		return FALSE;
	}
	LeaveCriticalSection(&pSpeech->data.cs);
	return TRUE;
}

BOOL	AddText(HSPEECH hSpeech, PCTSTR psText)
{
	if(hSpeech==NULL || psText==NULL)
		return FALSE;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hSpeech));

	SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hSpeech));
	SPEECHSENTENCE* pSentence=new SPEECHSENTENCE;
	if(pSentence == NULL)
		return FALSE;
	listInitHead(&pSentence->list);
	pSentence->psText=_tcsdup(psText);
	
	__try
	{
		EnterCriticalSection(&pSpeech->data.cs);
		listAddTail(&pSpeech->pSentenceHead->list,&pSentence->list);
	}
	__except(EXCEPTION_EXECUTE_HANDLER)
	{
		LeaveCriticalSection(&pSpeech->data.cs);
		return FALSE;
	}
	LeaveCriticalSection(&pSpeech->data.cs);
	return TRUE;
}

BOOL	SetVoice(HSPEECH hSpeech, VOICEPARAM* pVoice)
{
	if(hSpeech==NULL || pVoice==NULL)
		return FALSE;
	SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hSpeech));
	if(pVoice < (VOICEPARAM*)4048)
		GetPresetVoice((INT)pVoice,&pSpeech->voice);
	else
		pSpeech->voice=*pVoice;
	
	_setvoice(pSpeech->hECI,&pSpeech->voice);
	return TRUE;
}

BOOL	SetECIStyle(HSPEECH hSpeech, DWORD dwECIStyle)
{
	if(hSpeech==NULL)
		return FALSE;
	SDISPEECHLIST* pSpeech=(SDISPEECHLIST*)MAKEPOINTER(g_speechHead, GETHANDLEOFFSET(hSpeech));
	pSpeech->dwECIStyle=dwECIStyle;
	ECILanguageDialect Lang;
	switch(dwECIStyle & 0x0000000F)
	{
	case SDI_LANGUAGE_GENERAL_AMERICAN_ENGLISH:
		Lang=eciGeneralAmericanEnglish;
		break;
	case SDI_LANGUAGE_BRITISH_ENGLISH:
		Lang=eciBritishEnglish;
		break;
	case SDI_LANGUAGE_MANDARIN_CHINESE:
		Lang=eciMandarinChinese;
		break;
	case SDI_LANGUAGE_TAIWANESE_MANDARIN:
		Lang=eciTaiwaneseMandarin;
		break;
	default:
		Lang=eciGeneralAmericanEnglish;
		break;
	}
	eciSetParam(pSpeech->hECI,eciLanguageDialect,Lang);
	eciSetParam(pSpeech->hECI,eciDictionary,dwECIStyle & 0x00000010);
	eciSetParam(pSpeech->hECI,eciInputType,dwECIStyle &0x00000020);
	eciSetParam(pSpeech->hECI,eciNumberMode,dwECIStyle & 0x00000040);
	int nSampleRate;
	switch(dwECIStyle & 0x00000180)
	{
	case SDI_SAMPLERATE_11024:
		nSampleRate=1;
		break;
	case SDI_SAMPLERATE_22048:
		nSampleRate=2;
		break;
	default:
		nSampleRate=0;
		break;
	}
	eciSetParam(pSpeech->hECI,eciSampleRate,nSampleRate);
	return TRUE;
}

/*****************************************************************
				SDI Object: Earcon Object
							(HEARCON)
*****************************************************************/
// Earcon Helper 

// Encoding MIDI messages
DWORD MakeVoiceMsg(BYTE byStatus, BYTE byChannel, BYTE byData1, BYTE byData2)
{
	DWORD dwMsg;
	dwMsg = byStatus | byChannel;
    dwMsg |= byData1 << 8;
    dwMsg |= byData2 << 16;
	return dwMsg;
}

DWORD ReadMsg(BYTE** ppData)
{
	DWORD dwMsg;
	BYTE* p=*ppData;
	dwMsg = *p;
	switch(dwMsg & 0xF0)
	{
	case NOTE_OFF:	
	case NOTE_ON:
	case AFTER_TOUCH:
	case CONTROL_CHANGE:
	case PITCH_WHEEL:
		dwMsg |= (*(p+1))<<8;
		dwMsg |= (*(p+2))<<16;
		*ppData += 3;
		break;
	case PROGRAM_CHANGE:
		dwMsg |= (*(p+1))<<8;
		*ppData += 2;
		break;
	case CHANNEL_PRESSURE:
		dwMsg |= (*(p+1))<<8;
		*ppData += 2;
		break;
	case 0xF0:
		switch(dwMsg)
		{
		case SYSTEM_EXCLUSIVE:	// SysEx : 0xF0 ... 0xF7
			while(**ppData != 0xF7)
				*ppData++;
			*ppData++;
			break;
		case MTC_QUARTER_FRAME:
			*ppData += 2;
			break;
		case SONG_POSITION:
			*ppData += 3;
			break;
		case SONG_SELECT:
			*ppData += 2;
			break;
		case TUNE_REQUEST:
			*ppData += 1;
			break;
		case MIDI_CLOCK:
		case MIDI_START:
		case MIDI_CONTINUE:
		case MIDI_STOP:
		case ACTIVE_SENSE:
		// RESET is replaced by SYSTEM_EXCLUSIVE
			*ppData += 1;
			break;
		case NONMIDI:	// 0xFF
			dwMsg=0;
			{
				switch(*(p+1))	// type
				{
				case END_OF_TRACK:
					dwMsg=END_OF_TRACK;
					break;
				default:
					break;
				}
			}
			// Override the Non-MIDI Meta-Event
			*ppData += *(*ppData+2) +3;
			break;
		default:
			break;
		}
		break;
	default:		//Running Status not supported in PlayNonStop
		break;
	}
	return dwMsg;
}

DWORD ReadVarLen(BYTE** ppData)
{
	DWORD dw=0;
	while(**ppData & 0x80)
	{
		dw=(dw<<7) + **ppData & 0x7f;
		(*ppData)++;
	}
	dw=(dw<<7) + **ppData;
	(*ppData)++;
	return dw;
}

BOOL  WriteVarLen(BYTE** ppData, LONGLONG data_64)
{
	if(ppData==NULL)
		return FALSE;
	for(int i=9;i>1;i--)
	{
		if(data_64 >> (i-1)*7)
			*((*ppData)++)=(BYTE)((data_64 >> (i-1)*7) | 0x80);
	}
	*((*ppData)++)=(BYTE)data_64 & ~0x80;	// bit 0-6
	return TRUE;
}

DWORD ReadFixLen(BYTE** ppData, BYTE cb)
{
	DWORD dw=0;
	for(int i=0;i<cb;i++)
	{
		dw=(dw<<8) + **ppData;
		(*ppData)++;
	}
	return dw;
}

BOOL WriteFixLen(BYTE** ppData, DWORD data_64, BYTE cb)
{
	if(ppData==NULL)
		return FALSE;
	for(int i=cb;i>0;i--)
	{
		**ppData=(BYTE)(data_64 >> (i-1)*8);
		(*ppData)++;
	}
	return TRUE;
}

BOOL ResetEarcon(SDIEARCONLIST* pEarcon)
{
	if(pEarcon == NULL)
		return FALSE;
	pEarcon->data.dwStatus &= ~STATUS_EARCON_PLAYING;

	for(int i=0;i<pEarcon->nTracks;i++)
	{
		pEarcon->track[i].byPrevStatus=0;
		pEarcon->track[i].pbCursor=0;
		pEarcon->track[i].dwInterval=0;
		pEarcon->track[i].dwLength=0;
		delete[] pEarcon->track[i].pbData;
	}
	pEarcon->nTracks=0;
	return TRUE;
}

BOOL RewindEarcon(SDIEARCONLIST* pEarcon)
{
	if(pEarcon == NULL)
		return FALSE;
	pEarcon->data.dwStatus &= ~STATUS_EARCON_PLAYING;

	for(int i=0;i<pEarcon->nTracks;i++)
	{
		pEarcon->track[i].byPrevStatus=0;
		pEarcon->track[i].pbCursor=pEarcon->track[i].pbData;
		pEarcon->track[i].dwInterval=ReadVarLen(&pEarcon->track[i].pbCursor);
	}
	return TRUE;
}

DWORD WINAPI SDIEarconThread(PVOID pvContext)
{
	HEARCON hEarcon=(HEARCON)pvContext;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hEarcon));

	SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hEarcon));
	
	HANDLE handle[2]={pEarcon->data.semaphore, pEarcon->hDSBSync};
	DWORD dw;
	INT i;
	HRESULT hr;
	BOOL bQuit=FALSE;
	DWORD dwTimeOut=INFINITE;
	REFERENCE_TIME rtPrev;
	REFERENCE_TIME rt;
	pEarcon->pClock->GetTime(&rtPrev);
	BOOL bStoped=FALSE;
	BYTE *pDest, *pSrc;
	DWORD sizeDest,sizeSrc;
	
	while(!bQuit)
	{
		dw=WaitForMultipleObjects(1,handle,FALSE,dwTimeOut);	// Do NOT wait for buffer
																// notification.
		EnterCriticalSection(&pEarcon->data.cs);
		switch(dw)
		{
		case WAIT_OBJECT_0:
			{
				LISTHEADER* plist=pEarcon->data.msgHead.list.pNext;
				MSGLIST* pMsg=listGetStruct(plist, MSGLIST, list);
				listRemoveItem(plist);
				switch(pMsg->dwMessage)
				{
				case MSG_EARCON_QUIT:	// Quit loop
					bQuit=TRUE;
					break;
				case MSG_EARCON_PLAY:
					{
						int nMin=0;
						DWORD dwMin=pEarcon->track[0].dwInterval;
						for(int i=0;i<pEarcon->nTracks;i++)
						{
							if(pEarcon->track[i].dwInterval < dwMin)
							{
								nMin=i;
								dwMin=pEarcon->track[i].dwInterval;
							}
						}
						pEarcon->data.dwStatus |= STATUS_EARCON_PLAYING;
						pEarcon->nCurTrack=nMin;
						int denominator=(pEarcon->timeSignature>>16) & 0xff;
						dwTimeOut=dwMin*pEarcon->dwTempo/pEarcon->nTicksPerQN/1000;
						//pEarcon->pSoundBuffer->Play(0,0,DSBPLAY_LOOPING);
					}
					break;
				case MSG_EARCON_STOP:
					{
						//pEarcon->pSoundBuffer->Stop();
						dwTimeOut=INFINITE;
					}
					break;
				default:
					break;
				} // End of switch(pMsg->dwMessage)
				delete pMsg;
			}
			break;
		case WAIT_OBJECT_0+1:	
			// Transfer data from DSound Buffer to A3D Source
			// Note: However due to problem that DSound present, this branch is 
			// actually abandoned. See readme.txt for detailed information.
			{
				pEarcon->pSoundBuffer->Lock(0,
					SDI_EARCON_SECTIONSIZE*SDI_EARCON_NUM_DSBSECTION, (PVOID*)&pSrc, &sizeSrc,NULL,NULL,DSBLOCK_ENTIREBUFFER);
				pEarcon->data.pSource->Lock(0,
					SDI_EARCON_SECTIONSIZE*SDI_EARCON_NUM_SRCSECTION, (PVOID*)&pDest, &sizeDest,NULL,NULL,A3D_ENTIREBUFFER);
				memcpy(pDest+pEarcon->nDsbSec*SDI_EARCON_SECTIONSIZE,
					pSrc+pEarcon->nSrcSec*SDI_EARCON_SECTIONSIZE,SDI_EARCON_SECTIONSIZE);
				pEarcon->data.pSource->Unlock(pDest,sizeDest,NULL,0);
				pEarcon->pSoundBuffer->Unlock(pSrc,sizeSrc,NULL,0);

				pEarcon->nDsbSec++;
				if(pEarcon->nDsbSec == SDI_EARCON_NUM_DSBSECTION)
					pEarcon->nDsbSec=0;
				pEarcon->nSrcSec++;
				if(pEarcon->nSrcSec == SDI_EARCON_NUM_SRCSECTION)
					pEarcon->nSrcSec=0;
				
				if(!(pEarcon->data.dwStatus & STATUS_EARCON_PLAYING) 
					&& (pEarcon->nSrcSec==2) )
				{
					// Start Playing when write cursor is at the middle of scource buffer
					pEarcon->data.dwStatus |= STATUS_EARCON_PLAYING;
					hr=pEarcon->data.pSource->Play(A3D_LOOPED);
					_ASSERTE(SUCCEEDED(hr));
					hr=g_pA3d->Flush();
					_ASSERTE(SUCCEEDED(hr));
				}

				pEarcon->pClock->GetTime(&rt);
				if(dwTimeOut != INFINITE)
				{	
					dwTimeOut -= (rt-rtPrev)/10000; // 100 nanosec to millisec
					if(dwTimeOut & 0x80000000)
						dwTimeOut=0;	
				}
				rtPrev=rt;
			}
			break;
		case WAIT_TIMEOUT:	
			{
				TraceError("Time out.");
				// Time out for a MIDI message to be sent
				DWORD dwMsg;
				BYTE* p=pEarcon->track[pEarcon->nCurTrack].pbCursor;
				dwMsg = *p;
				if(dwMsg & 0x80)	// Not a Running Status
					pEarcon->track[pEarcon->nCurTrack].byPrevStatus=*p;
				switch(dwMsg & 0xF0)
				{
				case NOTE_OFF:	
				case NOTE_ON:
				case AFTER_TOUCH:
				case CONTROL_CHANGE:
				case PITCH_WHEEL:
					dwMsg |= (*(p+1))<<8;
					dwMsg |= (*(p+2))<<16;
					pEarcon->track[pEarcon->nCurTrack].pbCursor += 3;
					break;
				case PROGRAM_CHANGE:
					SetChannelInstrument(hEarcon, dwMsg & 0x0F/*pEarcon->nCurTrack*/,
						*(p+1), NULL);
					dwMsg |= (*(p+1))<<8;
					pEarcon->track[pEarcon->nCurTrack].pbCursor += 2;
					break;
				case CHANNEL_PRESSURE:
					dwMsg |= (*(p+1))<<8;
					pEarcon->track[pEarcon->nCurTrack].pbCursor += 2;
					break;
				case 0xF0:
					switch(dwMsg)
					{
					case SYSTEM_EXCLUSIVE:	// SysEx : 0xF0 ... 0xF7
						while(*pEarcon->track[pEarcon->nCurTrack].pbCursor != 0xF7)
							pEarcon->track[pEarcon->nCurTrack].pbCursor++;
						pEarcon->track[pEarcon->nCurTrack].pbCursor++;
						break;
					case MTC_QUARTER_FRAME:
						pEarcon->track[pEarcon->nCurTrack].pbCursor += 2;
						break;
					case SONG_POSITION:
						pEarcon->track[pEarcon->nCurTrack].pbCursor += 3;
						break;
					case SONG_SELECT:
						pEarcon->track[pEarcon->nCurTrack].pbCursor += 2;
						break;
					case TUNE_REQUEST:
						pEarcon->track[pEarcon->nCurTrack].pbCursor += 1;
						break;
					case MIDI_CLOCK:
					case MIDI_START:
					case MIDI_CONTINUE:
					case MIDI_STOP:
					case ACTIVE_SENSE:
					// RESET is replaced by SYSTEM_EXCLUSIVE
						pEarcon->track[pEarcon->nCurTrack].pbCursor += 1;
						break;
					case NONMIDI:	// 0xFF
						{
							switch(*(p+1))	// type
							{
							case END_OF_TRACK:
								{
									pEarcon->track[pEarcon->nCurTrack].dwInterval=0xffffffff;
									for(int i=0;i<pEarcon->nTracks;i++)
									{
										if(pEarcon->track[i].dwInterval != 0xffffffff)
											goto nextnote;
									}
									RewindEarcon(pEarcon);
								}
								break;
							default:
								break;
							}
						}
						// Override the Non-MIDI Meta-Event
						pEarcon->track[pEarcon->nCurTrack].pbCursor +=
							*(pEarcon->track[pEarcon->nCurTrack].pbCursor+2) +3;
						break;
					default:
						break;
					}
					goto postsent;
					break;
				default:		//Running Status
					dwMsg = (dwMsg<<8) + pEarcon->track[pEarcon->nCurTrack].byPrevStatus;
					dwMsg |= (*(p+1))<<16;
					pEarcon->track[pEarcon->nCurTrack].pbCursor += 2;
					break;
				} // End of switch(dwMsg & 0xF0)
				
				// Send out message
				REFERENCE_TIME	rt;
				hr=pEarcon->pClock->GetTime(&rt);
				_ASSERTE(SUCCEEDED(hr));
				hr=pEarcon->pMusicBuffer->PackStructured(rt, pEarcon->nChannelGroup, dwMsg);
				if(FAILED(hr))
				{
					// If there is any syntex error, quit playing
					RewindEarcon(pEarcon);
					dwTimeOut=INFINITE;
					break;
				}
				hr=pEarcon->pPort->PlayBuffer(pEarcon->pMusicBuffer);
				_ASSERTE(SUCCEEDED(hr));
				hr=pEarcon->pMusicBuffer->Flush();
				_ASSERTE(SUCCEEDED(hr));

postsent:
				// Substract the dwInterval of other tracks, reload the dwInterval
				// for current track, then wait for the nearest MIDI event to happen
				for(i=0;i<pEarcon->nTracks; i++)
				{
					if(i == pEarcon->nCurTrack 
						|| pEarcon->track[i].dwInterval==0xffffffff)
						continue;
					pEarcon->track[i].dwInterval -=
						pEarcon->track[pEarcon->nCurTrack].dwInterval;
				}
nextnote:
				if(pEarcon->data.dwStatus & STATUS_EARCON_PLAYING)
				{
					if(pEarcon->track[pEarcon->nCurTrack].dwInterval != 0xffffffff)
					{
						pEarcon->track[pEarcon->nCurTrack].dwInterval=
							ReadVarLen(&pEarcon->track[pEarcon->nCurTrack].pbCursor);
					}

					int nMin=0;
					DWORD dwMin=pEarcon->track[0].dwInterval;
					for(i=0;i<pEarcon->nTracks;i++)
					{
						if(pEarcon->track[i].dwInterval < dwMin)
						{
							nMin=i;
							dwMin=pEarcon->track[i].dwInterval;
						}
					}
					pEarcon->nCurTrack=nMin;
					
					int denominator=(pEarcon->timeSignature>>16) & 0xff;
					dwTimeOut=dwMin*pEarcon->dwTempo/pEarcon->nTicksPerQN/1000;
				}
				else
					dwTimeOut=INFINITE;
				
				pEarcon->pClock->GetTime(&rt);
				rtPrev=rt;
			} // End of WAIT_TIMEOUT
			break;
		default:
			TraceError("Wait failed.");
			break;
		} // End of switch(dw)

		LeaveCriticalSection(&pEarcon->data.cs);
	} // End of while(!bQuit)
	return 0;
}

HEARCON CreateEarcon(DWORD dwTempo, WORD nTicksPerQN, 
					 DWORD timeSignature, WORD keySignature)
{
	HEARCON hEarcon=MAKEHANDLETYPE(SDI_TYPE_EARCON);	// 1
	HRESULT hr;

	SDISTATLIST* pHandle=HEAPALLOC_STRUCT(g_hHandleHeap, SDISTATLIST);
	_ASSERTE(pHandle);
	if(g_handleHead != NULL)
	{
		hEarcon |= MAKEHANDLESTATOFFSET(GETOFFSET(pHandle, g_handleHead));	// 2
		if(GETOFFSET(pHandle, g_handleHead) > SDI_HANDLE_MAX_STATOFFSET)
			return NULL;
	}

	SDIEARCONLIST* pEarcon=HEAPALLOC_STRUCT(g_hEarconHeap, SDIEARCONLIST);
	_ASSERTE(pEarcon);
	if(g_earconHead == NULL)
		g_earconHead=pEarcon;
	if(GETOFFSET(pEarcon, g_earconHead) > SDI_HANDLE_MAX_OFFSET)
		return NULL;
	// Complete the handle
	hEarcon |= MAKEHANDLEOFFSET(GETOFFSET(pEarcon, g_earconHead));		// 3
	InitCommonData(&pEarcon->data);

	// Set up A3D source for "Displaying"
	hr=g_pA3d->NewSource(A3DSOURCE_TYPEDEFAULT,&(pEarcon->data.pSource));
	_ASSERTE(SUCCEEDED(hr));

	WAVEFORMATEX wfx; 
	memset(&wfx, 0, sizeof(WAVEFORMATEX)); 
	wfx.wFormatTag = WAVE_FORMAT_PCM; 
	wfx.nChannels = SDI_EARCON_AUDIOCHANNELS; 
	wfx.nSamplesPerSec = SDI_EARCON_SAMPLERATE; 
	wfx.nBlockAlign = SDI_EARCON_BITDEPTH/8; 
	wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign; 
	wfx.wBitsPerSample = SDI_EARCON_BITDEPTH;
	hr=pEarcon->data.pSource->SetAudioFormat(&wfx);
	_ASSERTE(SUCCEEDED(hr));
	// Source Buffer
	hr=pEarcon->data.pSource->AllocateAudioData(SDI_EARCON_SOURCEBUFFERSIZE);
	_ASSERTE(SUCCEEDED(hr));
	ResetSource(pEarcon->data.pSource);

	// DirectMusic Buffer
	DMUS_BUFFERDESC mdesc={sizeof(DMUS_BUFFERDESC)};
	mdesc.guidBufferFormat=GUID_NULL;
	mdesc.cbBuffer=SDI_EARCON_DMBUFFERSIZE;
	hr=g_pMusic->CreateMusicBuffer(&mdesc, &pEarcon->pMusicBuffer, NULL);
	_ASSERTE(SUCCEEDED(hr));

	// Direct Sound Buffer for sequencer output destination
	DSBUFFERDESC desc;
	ZeroMemory(&desc, sizeof(DSBUFFERDESC));
	desc.dwSize=sizeof(DSBUFFERDESC);
	desc.dwFlags=DSBCAPS_CTRLVOLUME|DSBCAPS_CTRL3D|DSBCAPS_CTRLPOSITIONNOTIFY
		|DSBCAPS_GLOBALFOCUS|DSBCAPS_GETCURRENTPOSITION2|DSBCAPS_LOCSOFTWARE;
	desc.dwBufferBytes=SDI_EARCON_DSBUFFERSIZE;
	desc.lpwfxFormat=&wfx;
	desc.guid3DAlgorithm=DS3DALG_HRTF_FULL;
	IDirectSoundBuffer*  pSBTemp=NULL;
	hr=g_pSound->CreateSoundBuffer(&desc, &pSBTemp, NULL);
	if(FAILED(hr))
		DXTRACE_ERR_MSGBOX("", hr);
	hr=pSBTemp->QueryInterface(IID_IDirectSoundBuffer8, (void**)&pEarcon->pSoundBuffer);
	_ASSERTE(SUCCEEDED(hr));
	hr=pSBTemp->QueryInterface(IID_IDirectSound3DBuffer8, (void**)&pEarcon->p3DBuffer);
	_ASSERTE(SUCCEEDED(hr));
	hr=pSBTemp->Release();
	_ASSERTE(SUCCEEDED(hr));
	pSBTemp=NULL;

	// Mute DS Buffer
	// hr=pEarcon->pSoundBuffer->SetVolume(-10000);

	// Notification Support
	IDirectSoundNotify8* pNotify=NULL;
	hr=pEarcon->pSoundBuffer->QueryInterface(IID_IDirectSoundNotify8, (void**)&pNotify);
	_ASSERTE(SUCCEEDED(hr));
	
	pEarcon->hDSBSync=CreateEvent(NULL, FALSE, FALSE, NULL);

	DSBCAPS caps={sizeof(DSBCAPS)};// Flexibility for runtime-determined configuration
	pEarcon->pSoundBuffer->GetCaps(&caps);
	DSBPOSITIONNOTIFY pn[SDI_EARCON_NUM_DSBSECTION];
	for(int i=0;i<SDI_EARCON_NUM_DSBSECTION;i++)
	{
		pn[i].dwOffset=SDI_EARCON_DSBUFFERSIZE/SDI_EARCON_NUM_DSBSECTION*i;
		pn[i].hEventNotify=pEarcon->hDSBSync;
	}
	hr=pNotify->SetNotificationPositions(SDI_EARCON_NUM_DSBSECTION,pn);
	_ASSERTE(SUCCEEDED(hr));
	hr=pNotify->Release();
	
	// DirectMusic Port
	DMUS_PORTCAPS portCaps={sizeof(DMUS_PORTCAPS)};
	int nPortIndex=0;
	BOOL bSucceeded=FALSE;
	while(SUCCEEDED(g_pMusic->EnumPort(nPortIndex,&portCaps)))
	{
		if(portCaps.dwFlags & DMUS_PC_DIRECTSOUND)
		{
			bSucceeded=TRUE;
			break;
		}
		nPortIndex++;
		if(nPortIndex>10)
			break;
	}
	_ASSERTE(bSucceeded);	// More fall-back-on work required.
	DMUS_PORTPARAMS8  portParam={sizeof(DMUS_PORTPARAMS8)};
	portParam.dwValidParams=DMUS_PORTPARAMS_SAMPLERATE|DMUS_PORTPARAMS_CHANNELGROUPS
							|DMUS_PORTPARAMS_AUDIOCHANNELS;
	portParam.dwAudioChannels=SDI_EARCON_AUDIOCHANNELS;
	portParam.dwSampleRate=SDI_EARCON_SAMPLERATE;
	portParam.dwChannelGroups=1;
	hr=g_pMusic->CreatePort(portCaps.guidPort, &portParam, &pEarcon->pPort, NULL);
	if(FAILED(hr))
		DXTRACE_ERR_MSGBOX("", hr);
	hr=pEarcon->pPort->SetDirectSound(g_pSound, pEarcon->pSoundBuffer);
	_ASSERTE(SUCCEEDED(hr));
	hr=pEarcon->pPort->Activate(TRUE);
	_ASSERTE(SUCCEEDED(hr));

	hr=pEarcon->pPort->GetLatencyClock(&pEarcon->pClock);
	_ASSERTE(SUCCEEDED(hr));

	// Set up MIDI parameters
	// Channel Group
	for(i=0;i<SDI_EARCON_MAXEARCON;i++)
	{
		if(!(g_channelGroupBitmap & (1 << i)))	// Find an entry
		{
			pEarcon->nChannelGroup=i;
			g_channelGroupBitmap |= (1 << i);
			break;
		}
	}
	// Tempo
	pEarcon->dwTempo=dwTempo;
	int denominator=(timeSignature>>16) & 0xff;
	pEarcon->BPM=1.0e6/(FLOAT)dwTempo*60*pow(2,denominator-2);
	pEarcon->nTicksPerQN=nTicksPerQN;

	// Signature
	pEarcon->timeSignature=timeSignature;
	pEarcon->keySignature=keySignature;

	InitializeCriticalSection(&pEarcon->data.cs);

	// Add earcon to handle list
	pHandle->hSDIObject=hEarcon;
	pHandle->dwThreadCreate=GetCurrentThreadId();
	if(g_handleHead == NULL)
		g_handleHead=pHandle;
	else
		listAddTail(&g_handleHead->list, &pHandle->list);
	
	// Start responding to MIDI sequence request.
	QueueUserWorkItem(SDIEarconThread, (PVOID)hEarcon, WT_EXECUTELONGFUNCTION);
	return hEarcon;
}

BOOL	SetChannelInstrument(HEARCON hEarcon, WORD nChannel, int nPatch, 
							 NOTERANGE* noteRange)
{
	if(hEarcon==NULL || nChannel<0 || nChannel>SDI_EARCON_NUM_CHANNEL || nPatch<0 || nPatch>127)
		return FALSE;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hEarcon));
	
	SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hEarcon));
	if(pEarcon->pDownloadedInstruments[nChannel])
	{
		pEarcon->pPort->UnloadInstrument(pEarcon->pDownloadedInstruments[nChannel]);
		pEarcon->pDownloadedInstruments[nChannel]->Release();
		pEarcon->pDownloadedInstruments[nChannel]=NULL;
	}
	
	IDirectMusicInstrument8* pInstrument=NULL;
	g_pGMCollection->GetInstrument(nPatch, &pInstrument);
	DMUS_NOTERANGE nr;
	if(noteRange == NULL)
	{
		nr.dwLowNote=LOWNOTE;
		nr.dwHighNote=HIGHNOTE;
	}
	else
	{
		memcpy(&nr, noteRange, sizeof(DMUS_NOTERANGE));
	}
	pEarcon->pPort->DownloadInstrument(pInstrument, 
		&pEarcon->pDownloadedInstruments[nChannel], &nr, 1);
	pInstrument->Release();
	// Send MIDI message to commit the timbre change
	REFERENCE_TIME rt;
	DWORD dwMsg;
	HRESULT hr;
	hr=pEarcon->pClock->GetTime(&rt);
		_ASSERTE(SUCCEEDED(hr));
	dwMsg=MakeVoiceMsg(CONTROL_CHANGE, nChannel, BANK_SELECT, (nPatch>>16) & 0x7fff);
	hr=pEarcon->pMusicBuffer->PackStructured(rt, pEarcon->nChannelGroup, dwMsg);
	_ASSERTE(SUCCEEDED(hr));
	dwMsg=MakeVoiceMsg(PROGRAM_CHANGE, nChannel, nPatch & 0xffff, 0);
	hr=pEarcon->pMusicBuffer->PackStructured(rt, pEarcon->nChannelGroup, dwMsg);
	_ASSERTE(SUCCEEDED(hr));
	hr=pEarcon->pPort->PlayBuffer(pEarcon->pMusicBuffer);
	_ASSERTE(SUCCEEDED(hr));
	hr=pEarcon->pMusicBuffer->Flush();
	_ASSERTE(SUCCEEDED(hr));

	return TRUE;
}

BOOL	PlayNonstop(HEARCON hEarcon, BYTE* pMusicData)
{
	if(hEarcon==NULL || pMusicData==NULL)
		return FALSE;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hEarcon));

	SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hEarcon));
	
	REFERENCE_TIME	rt;
	HRESULT			hr;
	hr=pEarcon->pClock->GetTime(&rt);
	_ASSERTE(SUCCEEDED(hr));
	DWORD dwInterval;
	DWORD dwMsg;
	WORD tpb=pEarcon->nTicksPerQN;
	DWORD tempo=pEarcon->dwTempo;

	Stop(hEarcon);
	
	while(1)
	{
		dwInterval=ReadVarLen(&pMusicData);
		dwMsg=ReadMsg(&pMusicData);
		if(dwMsg==0)
			break;
		rt+=(REFERENCE_TIME)dwInterval*tempo*MICROSEC_TO_100NANOSEC/tpb;
		hr=pEarcon->pMusicBuffer->PackStructured(rt, pEarcon->nChannelGroup, dwMsg);
		if(FAILED(hr))
		{
			// If there is any syntex error, quit playing
			hr=pEarcon->pMusicBuffer->Flush();
			_ASSERTE(SUCCEEDED(hr));
			Stop(hEarcon);
			return FALSE;
		}
	}
	hr=pEarcon->pPort->PlayBuffer(pEarcon->pMusicBuffer);
	_ASSERTE(SUCCEEDED(hr));
	hr=pEarcon->pMusicBuffer->Flush();
	_ASSERTE(SUCCEEDED(hr));
	return TRUE;
}


BOOL	PlaySegment(HEARCON hEarcon, BYTE** ppTracks, BYTE nTracks)
{
	if(hEarcon==NULL || ppTracks==NULL || nTracks==0)
		return FALSE;
	
	Stop(hEarcon);
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hEarcon));

	SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hEarcon));
	
	ResetEarcon(pEarcon);

	pEarcon->nTracks=nTracks;
	pEarcon->nCurTrack=0;
	for(int i=0;i<nTracks;i++)
	{
		// Get Track length
		BYTE* p=*(ppTracks+i);
		pEarcon->track[i].dwLength=ReadFixLen(&p, 4);
		pEarcon->track[i].pbData=new BYTE[pEarcon->track[i].dwLength];
		memcpy(pEarcon->track[i].pbData, p, pEarcon->track[i].dwLength);// length exclusive
		pEarcon->track[i].pbCursor=pEarcon->track[i].pbData;
		pEarcon->track[i].dwInterval=ReadVarLen(&pEarcon->track[i].pbCursor);
	}

	PostSDIMessage(&pEarcon->data, MSG_EARCON_PLAY, 0,NULL);
	return TRUE;
}

int	ParseNotation(HEARCON hEarcon, PTSTR psMusic, BYTE *pbData)
{
	if(hEarcon==NULL || psMusic==NULL)
		return -1;
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hEarcon));

	SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hEarcon));
	
	PTSTR p=_tcsdup(psMusic);
	TCHAR ch;
	int pitchWheel=0;
	BYTE note=0;
	BYTE prevNote=0;
	DWORD vel=0;
	DWORD prevVel=0;
	BYTE *pMusic=new BYTE[10000];
	memset(pMusic,0,10000);
	BYTE *pData=pMusic;
	BYTE *pTrack[16];
	BYTE channel=0;

	int dwTotleLen=0;

	// Add header length
	// Header chunk
	dwTotleLen+=4+4+6;
	if(pbData)
	{
		WriteFixLen(&pbData, 0x4D546864, 4);	// "MThd"
		WriteFixLen(&pbData, 0x00000006, 4);	// header length
		WriteFixLen(&pbData, 0x01, 2);			// format 1
		WriteFixLen(&pbData, 16, 2);		// ntrks, must be 16
		WriteFixLen(&pbData, pEarcon->nTicksPerQN, 2);	// division
	}
	
	// First chunk, define the attributes of the music
	dwTotleLen+=27;
	if(pbData)
	{
		WriteFixLen(&pbData, 0x4D54726B, 4);	// "MThd"
		WriteFixLen(&pbData, 0x00000013, 4);	// track length
		// Time signature
		WriteFixLen(&pbData, 0x00FF5804, 4);
		WriteFixLen(&pbData, pEarcon->timeSignature, 4);
		// Tempo
		WriteFixLen(&pbData, 0x00FF5103, 4);
		WriteFixLen(&pbData, pEarcon->dwTempo, 3);
		// End of track
		WriteFixLen(&pbData, 0x00FF2F00, 4);	

	}
	// Notation Format: 
	// Note: [b|#,...]{note(C4,F6,etc.)}{velocity}[.|..][trailing space]
	// Command: @command_name, parameter[,parameter...]
	while(1)
	{
		ch = *p;
		if( ch=='\0' || ch==';')
		{
			// Stop prev note
			WriteVarLen(&pData, prevVel);
			WriteFixLen(&pData, NOTE_OFF | channel, 1);
			WriteFixLen(&pData, prevNote, 1);
			WriteFixLen(&pData, 0x0, 1);
			
			// Write Trailing "FF 2F 00" to indicate the end of track
			WriteFixLen(&pData, 0x00FF2F00, 4);

			DWORD dwCb=(DWORD)(pData-pMusic);
			dwTotleLen+=dwCb+4+4;

			if(pbData)
			{
				// Write track data
				WriteFixLen(&pbData, 0x4D54726B, 4);	// "MTrk"
				WriteFixLen(&pbData, dwCb, 4);			// track length
				memcpy(pbData, pMusic, dwCb);			// Music data
				pbData+=dwCb;
			}

			pitchWheel=0;
			note=0;
			prevNote=0;
			vel=0;
			prevVel=0;
			memset(pMusic,0,10000);
			pData=pMusic;
			channel++;

			if(ch=='\0')
				break;	// End of music notation
			
			// Continue parsing
			p++;	
			// omit optional space
			ch=*p;
			while(ch==' ')
			{
				p++;
				ch=*p;
			}
		}
		
		//////////////////
		// Command
		if(ch == '@')
		{
			p++;
			BYTE bMsg;
			char t[10]="";
			char* pEnd;
			BYTE data1, data2;
			
			WriteVarLen(&pData, prevVel);		// Velocity
			
			switch(*p)
			{
			case 'I':
				bMsg=PROGRAM_CHANGE | channel;
				p++;

				// Get the program number
				t[0]=*p;
				p++;
				t[1]=*p;
				p++;
				t[2]='\0';
				pEnd=t+2;
				data1=(BYTE)strtol(t, &pEnd, 16);

				// optional space
				ch=*p;
				while(ch==' ')
				{
					p++;
					ch=*p;
				}
				
				WriteFixLen(&pData, bMsg, 1);
				WriteFixLen(&pData, data1, 1);
				break;
			default:
				break;
			}

			prevVel=0;
			continue;
		}



		//////////////////
		// MIDI note
		// [b|#]
		note=0;
		while(ch=='b' || ch=='#')
		{
			if(ch == 'b')
				note--;
			if(ch == '#')
				note++;
			p++;
			ch=*p;
		}
		// {note}
		switch(*p)
		{
		case 'C':
			note+=0;
			break;
		case 'D':
			note+=2;
			break;
		case 'E':
			note+=4;
			break;
		case 'F':
			note+=5;
			break;
		case 'G':
			note+=7;
			break;
		case 'A':
			note+=9;
			break;
		case 'B':
			note+=11;
			break;
		case 'N':
			{
				p++;
				note=*p;
				goto END_OF_NOTE;
			}
			break;
		default:
			note=0;
			break;
		}
		p++;
		note+=24+(*p-'1')*12;	// note group. Note range : 24-119(C1-B8)
		p++;
END_OF_NOTE:

		//{velocity}
		vel=(DWORD)(pEarcon->nTicksPerQN*4/pow(2, (*p - '0')));	// !!
		p++;

		// Dotted note
		ch=*p;
		int cent=1;
		while(ch=='.')
		{
			vel += vel/pow(2, cent);	
			p++;
			cent++;
			ch=*p;
		}

		// optional space
		ch=*p;
		while(ch==' ')
		{
			p++;
			ch=*p;
		}

		// Write music data
		WriteVarLen(&pData, prevVel);		// Velocity
			
		if(!prevNote)
		{
			;
		}
		else
		{
			// Stop prev note
			WriteFixLen(&pData, NOTE_OFF | channel, 1);
			WriteFixLen(&pData, prevNote, 1);
			WriteFixLen(&pData, 0x0, 1);
			// Play another note	
			WriteFixLen(&pData, 0x0, 1);		
		}
		WriteFixLen(&pData, NOTE_ON | channel, 1);
		WriteFixLen(&pData, note, 1);
		WriteFixLen(&pData, 0x7f, 1);
		prevVel=vel;
		prevNote=note;
	}
	
	delete[] pMusic;
	return dwTotleLen;
}

BOOL	PlayNotation(HEARCON  hEarcon, PTSTR psMusic)
{
	if(hEarcon==NULL || psMusic==NULL)
		return FALSE;
	//Stop(hEarcon);
	SDISTATLIST* pHandle=(SDISTATLIST*)MAKEPOINTER(g_handleHead, GETHANDLESTATOFFSET(hEarcon));

	SDIEARCONLIST* pEarcon=(SDIEARCONLIST*)MAKEPOINTER(g_earconHead, GETHANDLEOFFSET(hEarcon));
	
	ResetEarcon(pEarcon);

	PTSTR p=_tcsdup(psMusic);
	TCHAR ch;
	int pitchWheel=0;
	BYTE note=0;
	BYTE prevNote=0;
	DWORD vel=0;
	DWORD prevVel=0;
	BYTE *pMusic=new BYTE[10000];
	memset(pMusic,0,10000);
	BYTE *pData=pMusic;
	BYTE *pTrack[16];
	BYTE channel=0;

	// Notation Format: 
	// Note: [b|#,...]{note(C4,F6,etc.)}{velocity}[.|..][trailing space]
	// Command: @command_name, parameter[,parameter...]
	while(1)
	{
		ch = *p;
		if( ch=='\0' || ch==';')
		{
			// Write track data
			// Stop prev note
			WriteVarLen(&pData, prevVel);
			WriteFixLen(&pData, NOTE_OFF | channel, 1);
			WriteFixLen(&pData, prevNote, 1);
			WriteFixLen(&pData, 0x0, 1);
			
			// Write Trailing "FF 2F 00" to indicate the end of track
			WriteFixLen(&pData, 0x00FF2F00, 4);

			DWORD dwCb=(DWORD)(pData-pMusic);
			pTrack[channel]=new BYTE[dwCb+4];
			BYTE* pbTemp=pTrack[channel];
			WriteFixLen(&pbTemp, dwCb, 4);
			memcpy(pbTemp, pMusic, dwCb);

			pitchWheel=0;
			note=0;
			prevNote=0;
			vel=0;
			prevVel=0;
			memset(pMusic,0,10000);
			pData=pMusic;
			channel++;

			if(ch=='\0')
				break;
			
			// Continue parsing
			p++;	
			// omit optional space
			ch=*p;
			while(ch==' ')
			{
				p++;
				ch=*p;
			}
		}
		
		//////////////////
		// Command
		if(ch == '@')
		{
			p++;
			BYTE bMsg;
			char t[10]="";
			char* pEnd;
			BYTE data1, data2;
			
			WriteVarLen(&pData, prevVel);		// Velocity
			
			switch(*p)
			{
			case 'I':
				bMsg=PROGRAM_CHANGE | channel;
				p++;

				// Get the program number
				t[0]=*p;
				p++;
				t[1]=*p;
				p++;
				t[2]='\0';
				pEnd=t+2;
				data1=(BYTE)strtol(t, &pEnd, 16);

				// optional space
				ch=*p;
				while(ch==' ')
				{
					p++;
					ch=*p;
				}
				
				WriteFixLen(&pData, bMsg, 1);
				WriteFixLen(&pData, data1, 1);
				break;
			default:
				break;
			}

			prevVel=0;
			continue;
		}



		//////////////////
		// MIDI note
		// [b|#]
		note=0;
		while(ch=='b' || ch=='#')
		{
			if(ch == 'b')
				note--;
			if(ch == '#')
				note++;
			p++;
			ch=*p;
		}
		// {note}
		switch(*p)
		{
		case 'C':
			note+=0;
			break;
		case 'D':
			note+=2;
			break;
		case 'E':
			note+=4;
			break;
		case 'F':
			note+=5;
			break;
		case 'G':
			note+=7;
			break;
		case 'A':
			note+=9;
			break;
		case 'B':
			note+=11;
			break;
		case 'N':
			{
				p++;
				note=*p;
				goto END_OF_NOTE;
			}
			break;
		default:
			note=0;
			break;
		}
		p++;
		note+=24+(*p-'1')*12;	// note group. Note range : 24-119(C1-B8)
		p++;
END_OF_NOTE:

		//{velocity}
		vel=(DWORD)(pEarcon->nTicksPerQN*4/pow(2, (*p - '0')));	// !!
		p++;

		// Dotted note
		ch=*p;
		int cent=1;
		while(ch=='.')
		{
			vel += vel/pow(2, cent);	
			p++;
			cent++;
			ch=*p;
		}

		// optional space
		ch=*p;
		while(ch==' ')
		{
			p++;
			ch=*p;
		}

		// Write music data
		WriteVarLen(&pData, prevVel);		// Velocity
			
		if(!prevNote)
		{
			;
		}
		else
		{
			// Stop prev note
			WriteFixLen(&pData, NOTE_OFF | channel, 1);
			WriteFixLen(&pData, prevNote, 1);
			WriteFixLen(&pData, 0x0, 1);
			// Play another note	
			WriteFixLen(&pData, 0x0, 1);		
		}
		WriteFixLen(&pData, NOTE_ON | channel, 1);
		WriteFixLen(&pData, note, 1);
		WriteFixLen(&pData, 0x7f, 1);
		prevVel=vel;
		prevNote=note;
	}
	
	PlaySegment(hEarcon, pTrack, channel);
	delete[] pMusic;
	for(int i=0;i<channel;i++)
	{
		delete[] pTrack[i];
	}
	return TRUE;
}

By viewing downloads associated with this article you agree to the Terms of Service and the article's licence.

If a file you wish to view isn't highlighted, and is a text file (not binary), please let us know and we'll add colourisation support for it.

License

This article has no explicit license attached to it but may contain usage terms in the article text or the download files themselves. If in doubt please contact the author via the discussion board below.

A list of licenses authors might use can be found here


Written By
Engineer
China China
A student at Zhejiang University, Zhejiang, China.
Major in Automation.
Now I want to study machine vision and robotics, but I'm really consumed with choices between hardware and software, and between research and engineering.
I'll be glad if you can give some suggestions.

Comments and Discussions