Click here to Skip to main content
Click here to Skip to main content
Add your own
alternative version
Go to top

RCF - Interprocess Communication for C++

, 25 Oct 2011
A server/client IPC framework, using the C++ preprocessor as an IDL compiler.
rcf-04.zip
RCF-0.4
demo
vs2003
RCF
Client
Server
doc
html
class_r_c_f_1_1_exception.png
class_r_c_f_1_1_filter.png
class_r_c_f_1_1_filter_factory.png
class_r_c_f_1_1_filter_service.png
class_r_c_f_1_1_identity_filter.png
class_r_c_f_1_1_identity_filter_factory.png
class_r_c_f_1_1_i___client_transport.png
class_r_c_f_1_1_i___endpoint.png
class_r_c_f_1_1_i___service.png
class_r_c_f_1_1_i___session_manager.png
class_r_c_f_1_1_multicast_client_transport.png
class_r_c_f_1_1_object_factory_service.png
class_r_c_f_1_1_open_ssl_encryption_filter.png
class_r_c_f_1_1_open_ssl_encryption_filter_factory.png
class_r_c_f_1_1_publishing_service.png
class_r_c_f_1_1_rcf_server.png
class_r_c_f_1_1_remote_exception.png
class_r_c_f_1_1_subscription_service.png
class_r_c_f_1_1_tcp_endpoint.png
class_r_c_f_1_1_udp_endpoint.png
class_r_c_f_1_1_zlib_stateful_compression_filter_factory.png
class_r_c_f_1_1_zlib_stateless_compression_filter_factory.png
doxygen.png
ftv2blank.png
ftv2doc.png
ftv2folderclosed.png
ftv2folderopen.png
ftv2lastnode.png
ftv2link.png
ftv2mlastnode.png
ftv2mnode.png
ftv2node.png
ftv2plastnode.png
ftv2pnode.png
ftv2vertline.png
tab_b.gif
tab_l.gif
tab_r.gif
latex
annotated.tex
class_r_c_f_1_1_client_stub.tex
class_r_c_f_1_1_exception.eps
class_r_c_f_1_1_exception.tex
class_r_c_f_1_1_filter.eps
class_r_c_f_1_1_filter.tex
class_r_c_f_1_1_filter_description.tex
class_r_c_f_1_1_filter_factory.eps
class_r_c_f_1_1_filter_factory.tex
class_r_c_f_1_1_filter_service.eps
class_r_c_f_1_1_filter_service.tex
class_r_c_f_1_1_identity_filter.eps
class_r_c_f_1_1_identity_filter.tex
class_r_c_f_1_1_identity_filter_factory.eps
class_r_c_f_1_1_identity_filter_factory.tex
class_r_c_f_1_1_i___client_transport.eps
class_r_c_f_1_1_i___client_transport.tex
class_r_c_f_1_1_i___endpoint.eps
class_r_c_f_1_1_i___endpoint.tex
class_r_c_f_1_1_i___proactor.tex
class_r_c_f_1_1_i___rcf_client.tex
class_r_c_f_1_1_i___server_transport.tex
class_r_c_f_1_1_i___server_transport_ex.tex
class_r_c_f_1_1_i___service.eps
class_r_c_f_1_1_i___service.tex
class_r_c_f_1_1_i___session.tex
class_r_c_f_1_1_i___session_manager.eps
class_r_c_f_1_1_i___session_manager.tex
class_r_c_f_1_1_multicast_client_transport.eps
class_r_c_f_1_1_multicast_client_transport.tex
class_r_c_f_1_1_object_factory_service.eps
class_r_c_f_1_1_object_factory_service.tex
class_r_c_f_1_1_open_ssl_encryption_filter.eps
class_r_c_f_1_1_open_ssl_encryption_filter.tex
class_r_c_f_1_1_open_ssl_encryption_filter_factory.eps
class_r_c_f_1_1_open_ssl_encryption_filter_factory.tex
class_r_c_f_1_1_publishing_service.eps
class_r_c_f_1_1_publishing_service.tex
class_r_c_f_1_1_rcf_server.eps
class_r_c_f_1_1_rcf_server.tex
class_r_c_f_1_1_remote_exception.eps
class_r_c_f_1_1_remote_exception.tex
class_r_c_f_1_1_subscription_service.eps
class_r_c_f_1_1_subscription_service.tex
class_r_c_f_1_1_tcp_endpoint.eps
class_r_c_f_1_1_tcp_endpoint.tex
class_r_c_f_1_1_udp_endpoint.eps
class_r_c_f_1_1_udp_endpoint.tex
class_r_c_f_1_1_zlib_stateful_compression_filter.tex
class_r_c_f_1_1_zlib_stateful_compression_filter_factory.eps
class_r_c_f_1_1_zlib_stateful_compression_filter_factory.tex
class_r_c_f_1_1_zlib_stateless_compression_filter.tex
class_r_c_f_1_1_zlib_stateless_compression_filter_factory.eps
class_r_c_f_1_1_zlib_stateless_compression_filter_factory.tex
dirs.tex
dir_G_3A_2FDevelopment_2Fbuild_2Fscripts_2Fwin_2FBuildRcf2_5FOutput_2FRCF_2D0_2E4_2Finclude_2F.tex
dir_G_3A_2FDevelopment_2Fbuild_2Fscripts_2Fwin_2FBuildRcf2_5FOutput_2FRCF_2D0_2E4_2Finclude_2FRCF_2F.tex
dir_G_3A_2FDevelopment_2Fbuild_2Fscripts_2Fwin_2FBuildRcf2_5FOutput_2FRCF_2D0_2E4_2Finclude_2FRCF_2FProtocol_2F.tex
doxygen.sty
Helvetica.ttf
hierarchy.tex
Makefile
refman.tex
struct_r_c_f_1_1_filter_id_comparison.tex
include
RCF
Marshal.inl
Protocol
RcfServer.inl
ServerStub.inl
test
util
Platform
Machine
SPARC
x86
OS
Unix
Windows
Threads
SF
src
RCF
Protocol
SF
test
borland
Jamfile
Jamrules
Jamfile
Jamrules
vs2003
RCF
RCF
RCFTest
client.pem
server.pem
rcf-09c.zip
RCF-0.9c
demo
vs2003
RCF
Client
Server
include
RCF
Marshal.inl
Protocol
RcfServer.inl
ServerStub.inl
test
util
Platform
Machine
SPARC
x86
OS
Unix
Windows
Threads
SF
src
RCF
Protocol
util
SF
test
bcc
Jamfile
Jamrules
data
caCertA.pem
caCertB.pem
certA.pem
certB.pem
ssCert1.pem
ssCert2.pem
Jamfile
Jamrules
vc6
Jamfile
Jamrules
// uncomment to enable VLD leak detection - will automatically link to required libs
//#include "vld.h"
//#include "vldapi.h"

#include <vector>

#include <RCF/test/TestMinimal.hpp>

#include <RCF/Idl.hpp>
#include <RCF/RcfServer.hpp>
#include <RCF/PublishingService.hpp>
#include <RCF/SubscriptionService.hpp>
#include <RCF/test/TransportFactories.hpp>
#include <RCF/util/CommandLine.hpp>
#include <RCF/util/Profile.hpp>
#include <RCF/util/Platform/OS/Sleep.hpp>

#include <RCF/test/ThreadGroup.hpp>

// some weird problems with linux, maybe emanating from within asio? Something is causing a SIGABRT signal.
#ifdef __linux__
#include <signal.h>
#include <RCF/util/InitDeinit.hpp>
UTIL_ON_INIT( sigignore(SIGABRT) )
#endif

namespace Test_Notification {

    RCF_BEGIN(I_Events, "I_Events")
        RCF_METHOD_V0(void, onA)
        RCF_METHOD_V1(void, onB, int)
        RCF_METHOD_V2(void, onC, int, const std::string &)
    RCF_END(I_Events)

    class Events
    {
    public:

        Events() : nA(RCF_DEFAULT_INIT), nB(RCF_DEFAULT_INIT), nC(RCF_DEFAULT_INIT)
        {}

        void onA()
        {
            nA++;
        }

        void onB(int n)
        {
            nB++;
        }

        void onC(int n, std::string s)
        {
            nC++;
        }

        int nA;
        int nB;
        int nC;

    };

    class ServerTask
    {
    public:
        ServerTask(
            RCF::PublishingService &publishingService,
            const volatile bool &stopFlag) :
                mPublishingService(publishingService),
                mStopFlag(stopFlag)
        {}

        void operator()()
        {
            try
            {
                //mPublishingService.beginPublish<I_Events>();
                mPublishingService.beginPublish( (I_Events*) 0);
                while (!mStopFlag)
                {
                    //mPublishingService.publish<I_Events>().onA();
                    mPublishingService.publish( (I_Events*) 0).onA();
                }
                mPublishingService.endPublish( (I_Events*) 0);
            }
            catch(const std::exception &e)
            {
                std::cout << "Server task exception" << std::endl;
                std::cout << RCF::toString(e) << std::endl;
            }
        }

    private:
        RCF::PublishingService &mPublishingService;
        const volatile bool &mStopFlag;
    };

    class ClientTask
    {
    public:
        ClientTask(
            RCF::SubscriptionService &subscriptionService,
            RCF::EndpointPtr endpointPtr) :
                mSubscriptionService(subscriptionService),
                mEndpointPtr(endpointPtr)
        {}

        void operator()()
        {
            Events events;

            for (std::size_t n=0; n<1000; ++n)
            {
                try
                {

                    mSubscriptionService.beginSubscribe(
                        (I_Events*) 0,
                        events,
                        *mEndpointPtr,
                        RCF::ClientProgressPtr());

                    mSubscriptionService.endSubscribe(
                        (I_Events*) 0,
                        events);
                }
                catch(const std::exception &e)
                {
                    std::cout << "Client task exception, n=" << n << std::endl;
                    std::cout << RCF::toString(e) << std::endl;
                }
            }
        }

    private:
        RCF::SubscriptionService &mSubscriptionService;
        RCF::EndpointPtr mEndpointPtr;
    };

    void test1(
        RCF::PublishingService &publishingService,
        RCF::SubscriptionService &subscriptionService,
        RCF::EndpointPtr publisherClientEndpointPtr,
        std::vector<Events> &events)
    {

        events.clear();
        events.resize(3);

        for (std::size_t j=0; j<3;++j)
        {
            publishingService.beginPublish( (I_Events*) 0);

            for (std::size_t k=0; k<events.size(); ++k)
            {
                events[k] = Events();
                subscriptionService.beginSubscribe(
                    (I_Events *) 0,
                    events[k],
                    *publisherClientEndpointPtr,
                    RCF::ClientProgressPtr());
            }

            // give the server time to setup the subscriptions
            Platform::OS::Sleep(1);

            for (std::size_t k=0; k<events.size(); ++k)
            {
                subscriptionService.endSubscribe( (I_Events*) 0, events[k]);
                publishingService.publish( (I_Events*) 0).onA();
                publishingService.publish( (I_Events*) 0).onB(1);
                publishingService.publish( (I_Events*) 0).onC(1, "one");

                // give the subscribers time to receive the notifications
                Platform::OS::Sleep(1);
            }

            publishingService.endPublish( (I_Events*) 0);

            for (std::size_t k=0; k<events.size(); ++k)
            {
                BOOST_CHECK(events[k].nA == k);
                BOOST_CHECK(events[k].nB == k);
                BOOST_CHECK(events[k].nC == k);
            }

        }


    }

    void test2(
        RCF::PublishingService &publishingService,
        RCF::SubscriptionService &subscriptionService,
        RCF::EndpointPtr publisherClientEndpointPtr)
    {
        // intensive subscribing and unsubscribing
/*
#if !defined(BOOST_WINDOWS) || defined(__MINGW32__)
        BOOST_CHECK(1==0 && "Intensive subcribing and unsubscribing doesn't work properly yet");
        return;
#endif
*/
        // now fire off all the threads
        bool serverThreadStopFlag = false;
        Thread serverThread(( ServerTask(
            publishingService,
            boost::cref(serverThreadStopFlag))));

        Platform::OS::SleepMs(1000);

        ThreadGroup clientThreads;
        for (std::size_t j=0; j<2; ++j)
        {
            clientThreads.push_back( ThreadPtr( new Thread( ClientTask(
                subscriptionService,
                publisherClientEndpointPtr->clone()))));
        }
        joinThreadGroup(clientThreads);

        serverThreadStopFlag = true;
        serverThread.join();
    }

    void onSubscriptionDisconnect(std::vector<bool> &v, std::size_t which)
    {
        RCF_ASSERT(which < v.size());
        RCF_ASSERT(v[which] == false);
        v[which] = true;
    }

    void test3(
        RCF::PublishingService &publishingService,
        RCF::SubscriptionService &subscriptionService,
        RCF::EndpointPtr publisherClientEndpointPtr,
        std::vector<Events> &events)
    {
        events.clear();
        events.resize(10);
        std::vector<bool> disconnectedEvents(10);

        typedef RCF::SubscriptionPtr SubscriptionPtr;
        SubscriptionPtr subscriptionPtr;

        publishingService.beginPublish( (I_Events*) 0);

        for (std::size_t j=0; j<events.size(); ++j)
        {
            // insertion of following line fixed a bizarre ICE with vc6
            std::vector<bool> vb;

            SubscriptionPtr subscriptionPtr =
                subscriptionService.beginSubscribe(
                    (I_Events*) 0,
                    events[j],
                    *publisherClientEndpointPtr,
                    boost::bind(
                        onSubscriptionDisconnect,
                        boost::ref(disconnectedEvents),
                        j),
                    RCF::ClientProgressPtr());
        }

        // publish to all subscribers
        for (std::size_t j=0; j<events.size(); ++j)
        {
            BOOST_CHECK(events[j].nA == 0);
            BOOST_CHECK(events[j].nB == 0);
            BOOST_CHECK(events[j].nC == 0);
        }

        Platform::OS::Sleep(1);

        publishingService.publish( (I_Events*) 0).onA();
        publishingService.publish( (I_Events*) 0).onB(1);
        publishingService.publish( (I_Events*) 0).onC(1, "one");

        Platform::OS::Sleep(1);

        for (std::size_t j=0; j<events.size(); ++j)
        {
            BOOST_CHECK(events[j].nA == 1);
            BOOST_CHECK(events[j].nB == 1);
            BOOST_CHECK(events[j].nC == 1);
        }


        // check that polling for disconnections works, explicit disconnect

        subscriptionPtr = subscriptionService.getSubscriptionPtr(
            (I_Events*) 0,
            events[0]);

        BOOST_CHECK( subscriptionPtr.get() && subscriptionPtr->isConnected());

        subscriptionService.endSubscribe( (I_Events*) 0, events[0]);

        subscriptionPtr = subscriptionService.getSubscriptionPtr(
            (I_Events*) 0,
            events[0]);

        BOOST_CHECK(!subscriptionPtr);


        // check that polling for disconnections works, implicit disconnect

        for (std::size_t j=1; j<events.size(); ++j)
        {
            subscriptionPtr =
                subscriptionService.getSubscriptionPtr( (I_Events*) 0, events[j]);
            BOOST_CHECK(subscriptionPtr.get() && subscriptionPtr->isConnected());
        }

        publishingService.endPublish( (I_Events*) 0);
        Platform::OS::Sleep(1);

        for (std::size_t j=1; j<events.size(); ++j)
        {
            subscriptionPtr =
                subscriptionService.getSubscriptionPtr( (I_Events*) 0, events[j]);
            BOOST_CHECK(subscriptionPtr.get() && !subscriptionPtr->isConnected());
        }


        // check that we have disconnect notifications on all but first subscription

        BOOST_CHECK(disconnectedEvents[0] == false);
        for (std::size_t j=1; j<disconnectedEvents.size(); ++j)
        {
            BOOST_CHECK(disconnectedEvents[j] == true);
        }

        // have to call endSubscribe() before events[] goes out of scope!
        for (std::size_t j=1; j<events.size(); ++j)
        {
            subscriptionService.endSubscribe( (I_Events*) 0, events[j]);
        }
    }

    void test4(
        RCF::PublishingService &publishingService,
        RCF::SubscriptionService &subscriptionService,
        RCF::EndpointPtr publisherClientEndpointPtr,
        RCF::ServerTransportPtr publisherServerTransportPtr,
        std::vector<Events> &events)
    {
        events.clear();
        events.resize(100);

        for (std::size_t j=0; j<5; ++j)
        {
            publishingService.beginPublish( (I_Events*) 0);

            for (std::size_t k=0; k<events.size(); ++k)
            {
                events[k] = Events();
                subscriptionService.beginSubscribe(
                    (I_Events*) 0,
                    events[k],
                    *publisherClientEndpointPtr,
                    RCF::ClientProgressPtr());
            }

            {
                std::string transportName = typeid(*publisherServerTransportPtr).name();
                util::Profile profile(transportName + ": time spent on publishing calls");
                for (unsigned int k=0; k<events.size()/2; ++k)
                {
                    subscriptionService.endSubscribe( (I_Events*) 0, events[k]);
                    publishingService.publish( (I_Events*) 0).onA();
                    publishingService.publish( (I_Events*) 0).onB(1);
                    publishingService.publish( (I_Events*) 0).onC(1, "one");
                }
            }

            for (std::size_t k=events.size()/2; k<events.size(); ++k)
            {
                subscriptionService.endSubscribe( (I_Events*) 0, events[k]);
            }

            publishingService.endPublish( (I_Events*) 0);
        }
    }

} // namespace Test_Notification

int RCF_TEST_MAIN(int argc, char **argv)
{

    printTestHeader(__FILE__);

    using namespace Test_Notification;

    util::CommandLine::getSingleton().parse(argc, argv);

    for (std::size_t i=0; i<RCF::getTransportFactories().size(); ++i)
    {
        RCF::TransportFactoryPtr transportFactoryPtr;
        std::pair<RCF::ServerTransportPtr, RCF::ClientTransportAutoPtrPtr> transports;

        transportFactoryPtr = RCF::getTransportFactories()[i];
        transports = transportFactoryPtr->createTransports();
        RCF::ServerTransportPtr publisherServerTransportPtr( transports.first );
        RCF::ClientTransportAutoPtr publisherClientTransportAutoPtr( *transports.second );

        transportFactoryPtr = RCF::getTransportFactories()[i];
        transports = transportFactoryPtr->createNonListeningTransports();
        RCF::ServerTransportPtr subscriberServerTransportPtr( transports.first );
        //RCF::ClientTransportAutoPtr subscriberClientTransportAutoPtr( *transports.second );

        RCF::RcfServer publisher(publisherServerTransportPtr);
        RCF::RcfServer subscriber(subscriberServerTransportPtr);

        // need both I_ServerTransportEx and I_ServerTransportSessionFull for
        // publish/subscribe functionality
        RCF::I_ServerTransportEx *pServerTransportEx =
            dynamic_cast<RCF::I_ServerTransportEx *>(&publisher.getServerTransport());

        if (NULL == pServerTransportEx)
        {
            continue;
        }

        RCF::writeTransportTypes(
            std::cout,
            *publisherServerTransportPtr,
            *publisherClientTransportAutoPtr);

        RCF::PublishingServicePtr publishingServicePtr(new RCF::PublishingService);
        RCF::PublishingService &publishingService = *publishingServicePtr;
        publisher.addService(publishingServicePtr);

        RCF::SubscriptionServicePtr subscriptionServicePtr(new RCF::SubscriptionService);
        RCF::SubscriptionService &subscriptionService = *subscriptionServicePtr;
        subscriber.addService(subscriptionServicePtr);

        publisher.start();
        subscriber.start();

        // NB: must call endSubscribe<>(object) before object goes out of scope!
        // ie, object must outlive the subscription. Otherwise we get memory
        // corruption from invoking methods on a dead object.

        // TODO: only allow shared_ptr's for subscriptions?

        RCF::EndpointPtr publisherClientEndpointPtr(
            publisherClientTransportAutoPtr->getEndpointPtr());

        // the vec nonsense came about while porting to vc6

        std::vector<Events> vec;

        vec.clear();
        test1(
            publishingService,
            subscriptionService,
            publisherClientEndpointPtr,
            vec);

        // TODO: find out why this test goes busy on vc6
#if !defined(_MSC_VER) || _MSC_VER > 1200

        vec.clear();
        test2(
            publishingService,
            subscriptionService,
            publisherClientEndpointPtr);

#endif

        vec.clear();
        test3(
            publishingService,
            subscriptionService,
            publisherClientEndpointPtr,
            vec);

        vec.clear();
        test4(
            publishingService,
            subscriptionService,
            publisherClientEndpointPtr,
            publisherServerTransportPtr,
            vec);

        subscriber.stop();
        publisher.stop();

    }

    return boost::exit_success;
}





By viewing downloads associated with this article you agree to the Terms of Service and the article's licence.

If a file you wish to view isn't highlighted, and is a text file (not binary), please let us know and we'll add colourisation support for it.

License

This article, along with any associated source code and files, is licensed under The Code Project Open License (CPOL)

Share

About the Author

Jarl Lindrud

Australia Australia
Software developer, ex-resident of Sweden and now living in Canberra, Australia, working on distributed C++ applications. Jarl enjoys programming, but prefers skiing and playing table tennis. He derives immense satisfaction from referring to himself in third person.

| Advertise | Privacy | Mobile
Web04 | 2.8.140916.1 | Last Updated 25 Oct 2011
Article Copyright 2005 by Jarl Lindrud
Everything else Copyright © CodeProject, 1999-2014
Terms of Service
Layout: fixed | fluid