|
#include <celero/Celero.h>
#include <celero/Console.h>
#include <celero/BenchmarkInfo.h>
#include <celero/TestVector.h>
#include <celero/Utilities.h>
#include <celero/Executor.h>
#include <celero/Print.h>
#include <celero/ResultTable.h>
#include <iostream>
using namespace celero;
void executor::Execute(std::shared_ptr<BenchmarkInfo> x)
{
// Define a small internal function object to use to uniformly execute the tests.
std::function<void(const size_t)> testRunner = [x](const size_t problemSetValueIndex)
{
auto test = x->getFactory()->Create();
const auto testTime = test->Run(x->getOps(), problemSetValueIndex);
// Save test results
x->setRunTime(testTime.first);
x->incrementTotalRunTime(testTime.first);
};
auto temp = x->getFactory()->Create();
size_t problemSetIndex = x->getProblemSetSizeIndex();
if(x->getSamples() > 0)
{
for(auto j = x->getSamples(); j > 0; --j)
{
testRunner(problemSetIndex);
}
}
else
{
// JEF // Run for at least one second and at least 30 iteratons for good statistical sampling.
while((x->getTotalRunTime() < celero::UsPerSec) || (x->getSamples() < celero::StatisticalSample))
{
x->incrementSamples();
testRunner(problemSetIndex);
}
print::Auto(x);
}
ResultTable::Instance().add(x->getGroupName(), x->getTestName(), temp->getProblemSetValue(problemSetIndex), x->getUsPerOp());
}
void executor::RunAll()
{
bool moreProblemSetsLeft = true;
while(moreProblemSetsLeft == true)
{
moreProblemSetsLeft = RunAllBaselines();
moreProblemSetsLeft |= RunAllTests();
// Reset all baseline data.
celero::TestVector::Instance().forEachBaseline(
[](std::shared_ptr<BenchmarkInfo> info)
{
info->reset();
});
// Reset all benchmark data.
celero::TestVector::Instance().forEachTest(
[](std::shared_ptr<BenchmarkInfo> info)
{
info->reset();
});
}
ResultTable::Instance().print();
}
bool executor::RunAllBaselines()
{
print::StageBanner("Baselining");
bool moreProblemSetsLeft = false;
// Run through all the tests in ascending order.
celero::TestVector::Instance().forEachBaseline(
[&moreProblemSetsLeft](std::shared_ptr<BenchmarkInfo> info)
{
if(info->getProblemSetSizeIndex() < info->getProblemSetSize() || info->getProblemSetSizeIndex() == 0)
{
// Describe the beginning of the run.
print::Run(info);
Execute(info);
// Describe the end of the run.
print::Done(info);
info->setBaselineUnit(info->getRunTime());
info->incrementProblemSetSizeIndex();
moreProblemSetsLeft |= (info->getProblemSetSizeIndex() < info->getProblemSetSize());
}
});
return moreProblemSetsLeft;
}
bool executor::RunAllTests()
{
print::StageBanner("Benchmarking");
bool moreProblemSetsLeft = false;
// Run through all the tests in ascending order.
celero::TestVector::Instance().forEachTest(
[&moreProblemSetsLeft](std::shared_ptr<BenchmarkInfo> info)
{
if(info->getProblemSetSizeIndex() < info->getProblemSetSize() || info->getProblemSetSizeIndex() == 0)
{
// Describe the beginning of the run.
print::Run(info);
Execute(info);
// Describe the end of the run.
print::Done(info);
print::Baseline(info);
info->incrementProblemSetSizeIndex();
moreProblemSetsLeft |= (info->getProblemSetSizeIndex() < info->getProblemSetSize());
}
});
return moreProblemSetsLeft;
}
void executor::RunGroup(const std::string& x)
{
executor::RunBaseline(x);
// Run tests...
}
void executor::Run(const std::string&, const std::string&)
{
}
void executor::RunBaseline(const std::string&)
{
}
void executor::RunTest(const std::string&, const std::string&)
{
}
|
By viewing downloads associated with this article you agree to the Terms of Service and the article's licence.
If a file you wish to view isn't highlighted, and is a text file (not binary), please
let us know and we'll add colourisation support for it.
John Farrier is a professional C++ software engineer that specializes in modeling, simulation, and architecture development.
Specialties:
LVC Modeling & Simulation
Software Engineering, C++11, C++98, C, C#, FORTRAN, Python
Software Performance Optimization
Software Requirements Development
Technical Project and Team Leadership