/*
(c) 2005, Marc Clifton
All Rights Reserved
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
Neither the name of the Marc Clifton, "Advanced Unit Test", "AUT", nor the names
of its contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
using System;
using System.Collections;
using System.Diagnostics;
using System.Threading;
using Vts.KALib;
namespace Vts.UTCore
{
public abstract class TestFixtureRunner
{
protected TestFixture tf;
public abstract void RunTests();
public abstract bool ExceptionConfirmed(Exception e, TestAttribute ta);
protected TestFixtureRunner(TestFixture tf)
{
this.tf=tf;
}
protected TestAttribute.TestState RunTest(object instance, TestAttribute ta)
{
if ((!ta.SkipTest) && (!ta.IgnoreTest) && (!tf.IgnoreFixture))
{
SortableList runTimeList=new SortableList();
tf.NotifyTestListeners(ta, UTCore.TestEvent.TestStart);
if (TestRunner.bStopASAP)
{
ta.State=TestAttribute.TestState.Ignore;
ta.Result="Test aborted.";
ta.dTime=0;
ta.dMinTime=0;
ta.dMaxTime=0;
}
else
{
Trace.WriteLine("Running "+ta.TestClass.Namespace+"."+ta.TestClass.ToString()+"."+ta.TestMethod.ToString());
for (int i=0; i<ta.TestMethod.RepeatCount; i++)
{
ta.TestMethod.CurrentRep=i;
try
{
ta.Result="";
ta.State=TestAttribute.TestState.Pass; // assume it passes
tf.TestSetUp.Invoke(instance);
tf.MethodCodePathTracker.Clear();
Thread.Sleep(0); // let other threads execute
ta.Invoke(instance);
foreach(DictionaryEntry entry in tf.MethodCodePathTracker)
{
if (tf.FixtureCodePathTracker.Contains(entry.Key))
{
int n=(int)tf.FixtureCodePathTracker[entry.Key];
n+=(int)entry.Value;
tf.FixtureCodePathTracker[entry.Key]=n;
}
else
{
tf.FixtureCodePathTracker.Add(entry.Key, entry.Value);
}
}
if (ta.CodePath != -1)
{
if (!tf.MethodCodePathTracker.Contains(ta.CodePath))
{
ta.State=TestAttribute.TestState.Fail;
ta.Result="Code path "+ta.TestMethod.CodePath.ToString()+" did not execute.";
}
}
// If we get here, the test did not throw an exception.
// Was it supposed to?
if (ta.ExpectedException.ExceptionType != null)
{
ta.State=TestAttribute.TestState.Fail;
ta.Result="Expected exception "+ta.ExpectedException.ExceptionType.ToString()+" not encountered.";
}
// did we meet the min. operations per second requirement?
else if ( (ta.TestMethod.MinOPS > 0) && (1.0/ta.dTime < ta.TestMethod.MinOPS) )
{
ta.State=TestAttribute.TestState.Fail;
ta.Result="Min. operations/sec not met.";
}
// did we meet the max. memory allowed?
else if ( (ta.TestMethod.MaxK > 0) && (ta.TestMethod.MemoryUsed > ta.TestMethod.MaxK*1000) )
{
ta.State=TestAttribute.TestState.Fail;
ta.Result="Max. memory utilization exceeded.";
}
if (ta.State != TestAttribute.TestState.Pass)
{
// break on first failure
break;
}
}
catch(UnitTest.AssertionException e)
{
if (ExceptionConfirmed(e, ta))
{
ta.State=TestAttribute.TestState.Pass;
}
else
{
ta.State=TestAttribute.TestState.Fail;
ta.Result="Assertion failed: "+e.Message;
break;
}
}
catch(Exception e)
{
if (ExceptionConfirmed(e, ta))
{
ta.State=TestAttribute.TestState.Pass;
}
else
{
Debug.WriteLine(e.Message+"\r\n"+e.StackTrace);
ta.State=TestAttribute.TestState.Fail;
ta.Result="Exception occurred: "+e.Message+" "+e.InnerException;
}
}
finally
{
runTimeList.Add(ta.dTime);
tf.TestTearDown.Invoke(instance);
if (ta.TestMethod.RepeatCount > 1)
{
// only fire this event when we're really running a test repeatedly
tf.NotifyIterationListeners(ta);
}
}
if (TestRunner.bStopASAP)
break;
if (ta.TestMethod.RepeatDelay > 0)
{
Thread.Sleep(ta.TestMethod.RepeatDelay);
}
}
// if all the repetitions succeeded, then take the average execution time, ignoring the
// the best and worst times. (This also ignores samples under 3).
double total=0;
if (runTimeList.Count > 2)
{
for (int j=1; j<runTimeList.Count-1; j++)
{
total+=(double)runTimeList[j];
}
ta.dTime=(total==0.0 ? ta.dTime : (total /(runTimeList.Count-2)));
ta.dMinTime=(double)runTimeList[0];
ta.dMaxTime=(double)runTimeList[runTimeList.Count-1];
}
runTimeList.Clear();
}
}
else
{
if(ta.SkipTest)
{
ta.State=TestAttribute.TestState.NotRun;
}
else if(ta.IgnoreTest)
{
ta.Result=ta.IgnoreReason;
ta.State=TestAttribute.TestState.Ignore;
}
else if (tf.IgnoreFixture)
{
ta.Result=tf.IgnoreReason;
ta.State=TestAttribute.TestState.Ignore;
}
}
tf.NotifyTestListeners(ta,TestEvent.TestDone);
return ta.State;
}
}
}