summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdam Mitz <mitza@ociweb.com>2015-03-04 14:13:40 -0600
committerSteve Huston <shuston@riverace.com>2015-03-04 16:18:17 -0500
commit55fef38cb0e5ea4130f0c9b0549a1e18c1640525 (patch)
tree60f2805fd3173d35e6d31dd90124c25a252d76c7
parent6bb9f2ac31d84154852781556b1074575b3fe4f0 (diff)
downloadATCD-55fef38cb0e5ea4130f0c9b0549a1e18c1640525.tar.gz
Merge pull request #23 from mitza-oci/devpoll-resume
Fix to resume and register event handlers after returning -1 from handle_*
-rw-r--r--ACE/ace/Dev_Poll_Reactor.cpp17
-rw-r--r--ACE/tests/.gitignore178
-rw-r--r--ACE/tests/Dev_Poll_Reactor_Echo_Test.cpp574
-rw-r--r--ACE/tests/run_test.lst1
-rw-r--r--ACE/tests/tests.mpc7
5 files changed, 776 insertions, 1 deletions
diff --git a/ACE/ace/Dev_Poll_Reactor.cpp b/ACE/ace/Dev_Poll_Reactor.cpp
index 81bc9091626..97e9d356b2f 100644
--- a/ACE/ace/Dev_Poll_Reactor.cpp
+++ b/ACE/ace/Dev_Poll_Reactor.cpp
@@ -1320,7 +1320,22 @@ ACE_Dev_Poll_Reactor::dispatch_io_event (Token_Guard &guard)
if (info != 0 && info->event_handler == eh)
{
if (status < 0)
- this->remove_handler_i (handle, disp_mask, grd);
+ {
+ this->remove_handler_i (handle, disp_mask, grd);
+#ifdef ACE_HAS_EVENT_POLL
+ // epoll-based effectively suspends handlers around the upcall.
+ // If the handler must be resumed, check to be sure it's the
+ // same handle/handler combination still.
+ if (reactor_resumes_eh)
+ {
+ info = this->handler_rep_.find (handle);
+ if (info != 0 && info->event_handler == eh)
+ {
+ this->resume_handler_i (handle);
+ }
+ }
+#endif /* ACE_HAS_EVENT_POLL */
+ }
}
}
// Scope close handles eh ref count decrement, if needed.
diff --git a/ACE/tests/.gitignore b/ACE/tests/.gitignore
new file mode 100644
index 00000000000..3fb4c3d8a60
--- /dev/null
+++ b/ACE/tests/.gitignore
@@ -0,0 +1,178 @@
+/ACE_Init_Test
+/ACE_Test
+/ARGV_Test
+/Aio_Platform_Test
+/Arg_Shifter_Test
+/Array_Map_Test
+/Atomic_Op_Test
+/Auto_Event_Test
+/Auto_IncDec_Test
+/Barrier_Test
+/Based_Pointer_Test
+/Basic_Types_Test
+/Bound_Ptr_Test
+/Buffer_Stream_Test
+/Bug_1576_Regression_Test
+/Bug_1890_Regression_Test
+/Bug_2368_Regression_Test
+/Bug_2497_Regression_Test
+/Bug_2540_Regression_Test
+/Bug_2653_Regression_Test
+/Bug_2659_Regression_Test
+/Bug_2815_Regression_Test
+/Bug_2820_Regression_Test
+/CDR_Array_Test
+/CDR_File_Test
+/CDR_Test
+/Cache_Map_Manager_Test
+/Cached_Accept_Conn_Test
+/Cached_Allocator_Test
+/Cached_Conn_Test
+/Capabilities_Test
+/Codecs_Test
+/Collection_Test
+/Config_Test
+/Conn_Test
+/DLL_Test
+/DLList_Test
+/Date_Time_Test
+/Dev_Poll_Reactor_Test
+/Dev_Poll_Reactor_Echo_Test
+/Dirent_Test
+/Dynamic_Priority_Test
+/Enum_Interfaces_Test
+/Env_Value_Test
+/FIFO_Test
+/FlReactor_Test
+/Framework_Component_Test
+/Future_Set_Test
+/Future_Test
+/Get_Opt_Test
+/Handle_Set_Test
+/Hash_Map_Bucket_Iterator_Test
+/Hash_Map_Manager_Test
+/Hash_Multi_Map_Manager_Test
+/High_Res_Timer_Test
+/INET_Addr_Test
+/INET_Addr_Test_IPV6
+/IOStream_Test
+/Integer_Truncate_Test
+/Lazy_Map_Manager_Test
+/Log_Msg_Test
+/Logging_Strategy_Test
+/MEM_Stream_Test
+/MM_Shared_Memory_Test
+/MT_Reactor_Timer_Test
+/MT_Reactor_Upcall_Test
+/MT_Reference_Counted_Event_Handler_Test
+/MT_Reference_Counted_Notify_Test
+/MT_SOCK_Test
+/Malloc_Test
+/Manual_Event_Test
+/Map_Manager_Test
+/Map_Test
+/Max_Default_Port_Test
+/Max_Default_Port_Test_IPV6
+/Mem_Map_Test
+/Memcpy_Test
+/Message_Block_Test
+/Message_Queue_Notifications_Test
+/Message_Queue_Test
+/Message_Queue_Test_Ex
+/Multicast_Test
+/Multicast_Test_IPV6
+/Multihomed_INET_Addr_Test
+/Multihomed_INET_Addr_Test_IPV6
+/Naming_Test
+/Network_Adapters_Test
+/New_Fail_Test
+/NonBlocking_Conn_Test
+/Notification_Queue_Unit_Test
+/Notify_Performance_Test
+/OS_Test
+/Object_Manager_Test
+/Obstack_Test
+/OrdMultiSet_Test
+/Pipe_Test
+/Priority_Buffer_Test
+/Priority_Reactor_Test
+/Priority_Task_Test
+/Proactor_Scatter_Gather_Test
+/Proactor_Test
+/Proactor_Test_IPV6
+/Proactor_Timer_Test
+/Process_Manager_Test
+/Process_Manual_Event_Test
+/Process_Mutex_Test
+/Process_Semaphore_Test
+/Process_Strategy_Test
+/QtReactor_Test
+/QtReactor_Test_moc.cpp
+/RB_Tree_Test
+/Reactor_Dispatch_Order_Test
+/Reactor_Exceptions_Test
+/Reactor_Notification_Queue_Test
+/Reactor_Notify_Test
+/Reactor_Performance_Test
+/Reactor_Registration_Test
+/Reactor_Timer_Test
+/Reactors_Test
+/Reader_Writer_Test
+/Recursive_Condition_Bug_Test
+/Recursive_Condition_Test
+/Recursive_Mutex_Test
+/Refcounted_Auto_Ptr_Test
+/Reference_Counted_Event_Handler_Test
+/Reverse_Lock_Test
+/SOCK_Connector_Test
+/SOCK_Dgram_Bcast_Test
+/SOCK_Dgram_Test
+/SOCK_Netlink_Test
+/SOCK_SEQPACK_SCTP_Test
+/SOCK_Send_Recv_Test
+/SOCK_Send_Recv_Test_IPV6
+/SOCK_Test
+/SOCK_Test_IPv6
+/SPIPE_Test
+/SString_Test
+/SV_Shared_Memory_Test
+/Semaphore_Test
+/Sendfile_Test
+/Service_Config_Test
+/Signal_Test
+/Sigset_Ops_Test
+/Simple_Message_Block_Test
+/Svc_Handler_Test
+/TP_Reactor_Test
+/TSS_Static_Test
+/TSS_Test
+/Task_Ex_Test
+/Task_Test
+/Thread_Creation_Threshold_Test
+/Thread_Manager_Test
+/Thread_Mutex_Test
+/Thread_Pool_Reactor_Resume_Test
+/Thread_Pool_Reactor_Test
+/Thread_Pool_Test
+/Time_Service_Test
+/Time_Value_Test
+/Timeprobe_Test
+/Timer_Cancellation_Test
+/Timer_Queue_Reference_Counting_Test
+/Timer_Queue_Test
+/TkReactor_Test
+/Token_Strategy_Test
+/Tokens_Test
+/UPIPE_SAP_Test
+/UUIDTest
+/Unbounded_Set_Test
+/Unbounded_Set_Test_Ex
+/UnloadLibACE
+/Upgradable_RW_Test
+/Vector_Test
+/WFMO_Reactor_Test
+/XtAthenaReactor_Test
+/XtMotifReactor_Test
+/XtReactor_Test
+/test.reg
+/testConfig.ini
diff --git a/ACE/tests/Dev_Poll_Reactor_Echo_Test.cpp b/ACE/tests/Dev_Poll_Reactor_Echo_Test.cpp
new file mode 100644
index 00000000000..04c169234a8
--- /dev/null
+++ b/ACE/tests/Dev_Poll_Reactor_Echo_Test.cpp
@@ -0,0 +1,574 @@
+//=============================================================================
+/**
+ * @file Dev_Poll_Reactor_Echo_Test.cpp
+ *
+ * This test implements a simple echo server using the
+ * Dev_Poll_Reactor. This forces the reactor to behave like a
+ * reactor would in a typical client/server application, i.e.,
+ * receive a message then send a messages.
+ * @author Justin Wilson <wilsonj@ociweb.com>
+ */
+//=============================================================================
+
+#include "test_config.h"
+
+#if defined (ACE_HAS_DEV_POLL) || defined (ACE_HAS_EVENT_POLL)
+
+#include "ace/OS_NS_signal.h"
+#include "ace/Reactor.h"
+#include "ace/Dev_Poll_Reactor.h"
+
+#include "ace/Acceptor.h"
+#include "ace/Connector.h"
+
+#include "ace/SOCK_Acceptor.h"
+#include "ace/SOCK_Connector.h"
+#include "ace/SOCK_Stream.h"
+
+#include "ace/OS_NS_unistd.h"
+#include "ace/OS_NS_netdb.h"
+
+#include <queue>
+
+typedef ACE_Svc_Handler<ACE_SOCK_STREAM, ACE_NULL_SYNCH> SVC_HANDLER;
+
+// ----------------------------------------------------
+
+class Client : public SVC_HANDLER
+{
+public:
+
+ Client (void);
+
+ //FUZZ: disable check_for_lack_ACE_OS
+ virtual int open (void * = 0);
+ //FUZZ: enable check_for_lack_ACE_OS
+
+ virtual int handle_output (ACE_HANDLE handle);
+
+ virtual int handle_input (ACE_HANDLE handle);
+
+ virtual int handle_timeout (const ACE_Time_Value &current_time,
+ const void *act);
+
+ virtual int handle_close (ACE_HANDLE handle,
+ ACE_Reactor_Mask mask);
+
+ std::string sent;
+ std::string received;
+
+private:
+ unsigned int call_count_;
+};
+
+
+class Server : public SVC_HANDLER
+{
+public:
+
+ Server (void);
+
+ virtual int handle_input (ACE_HANDLE handle);
+
+ virtual int handle_output (ACE_HANDLE handle);
+
+ virtual int handle_close (ACE_HANDLE handle,
+ ACE_Reactor_Mask mask);
+
+private:
+ int send_i (const char* buffer,
+ size_t size);
+
+ std::queue<std::string*> buffer_list_;
+ size_t offset_;
+};
+
+// ----------------------------------------------------
+
+Client::Client (void)
+ : call_count_ (0)
+{
+}
+
+int
+Client::open (void *)
+{
+ // Trigger writes on a timer.
+ ACE_Time_Value delay (1, 0);
+ ACE_Time_Value restart (1, 0);
+ if (this->reactor ()->schedule_timer (this,
+ 0,
+ delay,
+ restart) == -1)
+ {
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Unable to schedule client side ")
+ ACE_TEXT ("timer in ACE_Dev_Poll_Reactor")),
+ -1);
+ }
+
+ if (this->reactor ()->register_handler (this, ACE_Event_Handler::READ_MASK) == -1)
+ {
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Unable to register for reading ")
+ ACE_TEXT ("in ACE_Dev_Poll_Reactor")),
+ -1);
+ }
+
+ return 0;
+}
+
+int
+Client::handle_output (ACE_HANDLE handle)
+{
+ std::string buffer = "Hello, world!";
+ ssize_t bytes_sent = this->peer ().send (buffer.data (), buffer.size ());
+
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Client::handle_output; handle = %d")
+ ACE_TEXT (" bytes sent %d\n"),
+ handle,
+ bytes_sent));
+
+ if (bytes_sent == -1)
+ {
+ if (errno == EWOULDBLOCK)
+ return 0; // Flow control kicked in.
+ else if (errno == EPIPE || errno == ECONNRESET)
+ {
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Client::handle_output; server ")
+ ACE_TEXT ("closed handle %d\n"),
+ this->peer ().get_handle ()));
+ return -1;
+ }
+ else
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Client::handle_output")),
+ -1);
+ }
+ else if (bytes_sent == 0)
+ return -1;
+ else
+ this->sent.append (buffer.substr (0, bytes_sent));
+
+ return -1;
+}
+
+int
+Client::handle_input (ACE_HANDLE handle)
+{
+ for (;;)
+ {
+ char buffer[BUFSIZ];
+ ssize_t bytes_read = this->peer ().recv (buffer, BUFSIZ);
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Client::handle_input handle = %d bytes_read = %d\n"),
+ handle, bytes_read));
+
+ if (bytes_read == -1 && errno == EWOULDBLOCK)
+ {
+ return 0;
+ }
+ else if (bytes_read == 0)
+ {
+ // Closed.
+ return -1;
+ }
+ else
+ {
+ this->received.append (buffer, bytes_read);
+ }
+ }
+}
+
+int
+Client::handle_timeout (const ACE_Time_Value &, const void *)
+{
+ ACE_DEBUG ((LM_INFO,
+ ACE_TEXT ("(%t) Expected client timeout occured at: %T\n")));
+
+ if (this->call_count_ != 10)
+ {
+ // Register for write.
+ if (this->reactor ()->register_handler (this, ACE_Event_Handler::WRITE_MASK) == -1)
+ {
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Unable to register for writing ")
+ ACE_TEXT ("in ACE_Dev_Poll_Reactor")),
+ -1);
+ }
+ this->call_count_++;
+ return 0;
+ }
+ else
+ {
+ // Shutdown.
+ if (this->reactor ()->end_reactor_event_loop () == 0)
+ ACE_DEBUG ((LM_INFO,
+ ACE_TEXT ("(%t) Successful client reactor shutdown.\n")));
+ else
+ ACE_ERROR ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Failed client reactor shutdown")));
+
+ // Force this service handler to be closed in either case.
+ return -1;
+ }
+}
+
+int
+Client::handle_close (ACE_HANDLE handle,
+ ACE_Reactor_Mask mask)
+{
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Client::handle_close handle = %d mask = %xd\n"), handle, mask));
+ return 0;
+ //return SVC_HANDLER::handle_close (handle, mask);
+}
+
+// ----------------------------------------------------
+
+Server::Server (void)
+ : offset_ (0)
+{
+}
+
+int
+Server::handle_input (ACE_HANDLE handle)
+{
+ for (;;)
+ {
+ char buffer[BUFSIZ];
+ ssize_t bytes_read = this->peer ().recv (buffer, BUFSIZ);
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Server::handle_input handle = %d bytes_read = %d\n"),
+ handle, bytes_read));
+
+ if (bytes_read == -1 && errno == EWOULDBLOCK)
+ {
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Server::handle_input handle = %d EWOULDBLOCK\n"),
+ handle));
+ return 0;
+ }
+ else if (bytes_read == 0)
+ {
+ // Closed.
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Server::handle_input handle = %d CLOSED\n"),
+ handle));
+ return -1;
+ }
+ else
+ {
+ if (send_i (buffer, bytes_read) == -1)
+ return -1;
+ }
+ }
+}
+
+int
+Server::send_i (const char* buffer,
+ size_t size)
+{
+ if (size == 0)
+ {
+ return 0;
+ }
+
+ if (buffer_list_.empty ())
+ {
+ // Register for write.
+ if (this->reactor ()->register_handler (this, ACE_Event_Handler::WRITE_MASK) == -1)
+ {
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Unable to register for writing ")
+ ACE_TEXT ("in ACE_Dev_Poll_Reactor")),
+ -1);
+ }
+ }
+
+ buffer_list_.push (new std::string (buffer, size));
+ return 0;
+}
+
+int
+Server::handle_output (ACE_HANDLE handle)
+{
+ while (!buffer_list_.empty ())
+ {
+ size_t bytes_to_send = buffer_list_.front ()->size () - offset_;
+ ssize_t bytes_sent = this->peer ().send (buffer_list_.front ()->data () + offset_, bytes_to_send);
+
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Server::handle_output; handle = %d")
+ ACE_TEXT (" bytes sent %d\n"),
+ handle, bytes_sent));
+
+ if (bytes_sent == -1)
+ {
+ if (errno == EWOULDBLOCK)
+ return 0;
+ else if (errno == EPIPE || errno == ECONNRESET)
+ {
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Client::handle_output; server ")
+ ACE_TEXT ("closed handle %d\n"),
+ this->peer ().get_handle ()));
+ return -1;
+ }
+ else
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Client::handle_output")),
+ -1);
+ }
+ else if (bytes_sent == 0)
+ return -1;
+ else
+ {
+ if (bytes_sent == static_cast<ssize_t> (bytes_to_send))
+ {
+ delete buffer_list_.front ();
+ buffer_list_.pop ();
+ offset_ = 0;
+ }
+ else
+ {
+ offset_ += bytes_sent;
+ }
+ }
+ }
+
+ return -1;
+}
+
+int
+Server::handle_close (ACE_HANDLE handle,
+ ACE_Reactor_Mask mask)
+{
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Server::handle_close handle = %d mask = %xd\n"), handle, mask));
+ return 0;
+ //return SVC_HANDLER::handle_close (handle, mask);
+}
+
+// ----------------------------------------------------
+
+typedef ACE_Acceptor<Server, ACE_SOCK_ACCEPTOR> ACCEPTOR;
+typedef ACE_Connector<Client, ACE_SOCK_CONNECTOR> CONNECTOR;
+
+// ----------------------------------------------------
+
+class TestAcceptor : public ACCEPTOR
+{
+public:
+
+ virtual int accept_svc_handler (Server * handler)
+ {
+ int result = this->ACCEPTOR::accept_svc_handler (handler);
+
+ if (result != 0)
+ {
+ if (errno != EWOULDBLOCK)
+ ACE_ERROR ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Unable to accept connection")));
+
+ return result;
+ }
+
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Accepted connection. ")
+ ACE_TEXT ("Stream handle: <%d>\n"),
+ handler->get_handle ()));
+
+ return result;
+ }
+
+};
+
+// ----------------------------------------------------
+
+class TestConnector : public CONNECTOR
+{
+public:
+
+ virtual int connect_svc_handler (
+ CONNECTOR::handler_type *& handler,
+ const CONNECTOR::addr_type &remote_addr,
+ ACE_Time_Value *timeout,
+ const CONNECTOR::addr_type &local_addr,
+ int reuse_addr,
+ int flags,
+ int perms)
+ {
+ const int result = this->CONNECTOR::connect_svc_handler (handler,
+ remote_addr,
+ timeout,
+ local_addr,
+ reuse_addr,
+ flags,
+ perms);
+
+ if (result != 0)
+ return result;
+
+ ACE_TCHAR hostname[MAXHOSTNAMELEN];
+ if (remote_addr.get_host_name (hostname,
+ sizeof (hostname)) != 0)
+ {
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Unable to retrieve hostname")),
+ -1);
+ }
+
+ ACE_DEBUG ((LM_DEBUG,
+ ACE_TEXT ("(%t) Connected to <%s:%d>.\n"),
+ hostname,
+ (int) remote_addr.get_port_number ()));
+
+ return result;
+ }
+
+ virtual int connect_svc_handler (
+ CONNECTOR::handler_type *& handler,
+ CONNECTOR::handler_type *& sh_copy,
+ const CONNECTOR::addr_type &remote_addr,
+ ACE_Time_Value *timeout,
+ const CONNECTOR::addr_type &local_addr,
+ int reuse_addr,
+ int flags,
+ int perms) {
+ sh_copy = handler;
+ return this->connect_svc_handler (handler, remote_addr, timeout,
+ local_addr, reuse_addr, flags,
+ perms);
+ }
+};
+
+// ----------------------------------------------------
+
+static int
+disable_signal (int sigmin, int sigmax)
+{
+#if !defined (ACE_LACKS_UNIX_SIGNALS)
+ sigset_t signal_set;
+ if (ACE_OS::sigemptyset (&signal_set) == - 1)
+ ACE_ERROR ((LM_ERROR,
+ ACE_TEXT ("Error: (%P|%t):%p\n"),
+ ACE_TEXT ("sigemptyset failed")));
+
+ for (int i = sigmin; i <= sigmax; i++)
+ ACE_OS::sigaddset (&signal_set, i);
+
+ // Put the <signal_set>.
+# if defined (ACE_LACKS_PTHREAD_THR_SIGSETMASK)
+ // In multi-threaded application this is not POSIX compliant
+ // but let's leave it just in case.
+ if (ACE_OS::sigprocmask (SIG_BLOCK, &signal_set, 0) != 0)
+# else
+ if (ACE_OS::thr_sigsetmask (SIG_BLOCK, &signal_set, 0) != 0)
+# endif /* ACE_LACKS_PTHREAD_THR_SIGSETMASK */
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("Error: (%P|%t): %p\n"),
+ ACE_TEXT ("SIG_BLOCK failed")),
+ -1);
+#else
+ ACE_UNUSED_ARG (sigmin);
+ ACE_UNUSED_ARG (sigmax);
+#endif /* ACE_LACKS_UNIX_SIGNALS */
+
+ return 0;
+}
+
+// ----------------------------------------------------
+
+int
+run_main (int, ACE_TCHAR *[])
+{
+ ACE_START_TEST (ACE_TEXT ("Dev_Poll_Reactor_Echo_Test"));
+
+ // Make sure we ignore SIGPIPE
+ disable_signal (SIGPIPE, SIGPIPE);
+
+ ACE_Dev_Poll_Reactor dp_reactor;
+ dp_reactor.restart (1); // Restart on EINTR
+ ACE_Reactor reactor (&dp_reactor);
+
+ TestConnector client;
+
+ int flags = 0;
+ ACE_SET_BITS (flags, ACE_NONBLOCK); // Enable non-blocking in the
+ // Svc_Handlers.
+
+ if (client.open (&reactor, flags) != 0)
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Unable to open client service handler")),
+ -1);
+
+ unsigned short port = 54678;
+
+ ACE_INET_Addr addr;
+
+ if (addr.set (port, INADDR_LOOPBACK) != 0)
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("server_worker - ACE_INET_Addr::set")),
+ -1);
+
+ TestAcceptor server;
+
+ if (server.open (addr, &reactor, flags) != 0)
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Unable to open server service handler")),
+ -1);
+
+ Client *client_handler = 0;
+
+ if (client.connect (client_handler, addr) != 0)
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Unable to connect to server")),
+ -1);
+
+ if (reactor.run_reactor_event_loop () != 0)
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("(%t) %p\n"),
+ ACE_TEXT ("Error when running client ")
+ ACE_TEXT ("reactor event loop")),
+ -1);
+
+ ACE_DEBUG((LM_DEBUG, "sent: %C\n", client_handler->sent.c_str ()));
+ ACE_DEBUG((LM_DEBUG, "received: %C\n", client_handler->received.c_str ()));
+
+ ACE_TEST_ASSERT (client_handler->sent == client_handler->received);
+
+ ACE_END_TEST;
+
+ return 0;
+}
+
+#else
+
+int
+run_main (int, ACE_TCHAR *[])
+{
+ ACE_START_TEST (ACE_TEXT ("Dev_Poll_Reactor_Echo_Test"));
+ ACE_ERROR ((LM_INFO,
+ ACE_TEXT ("Dev Poll and Event Poll are not supported ")
+ ACE_TEXT ("on this platform\n")));
+ ACE_END_TEST;
+ return 0;
+}
+
+#endif /* ACE_HAS_DEV_POLL || ACE_HAS_EVENT_POLL */
diff --git a/ACE/tests/run_test.lst b/ACE/tests/run_test.lst
index 3d3dee3fc5d..fe41989a294 100644
--- a/ACE/tests/run_test.lst
+++ b/ACE/tests/run_test.lst
@@ -99,6 +99,7 @@ DLL_Test: !STATIC Linux
DLList_Test: !ACE_FOR_TAO
Date_Time_Test: !ACE_FOR_TAO
Dev_Poll_Reactor_Test: !nsk !ST
+Dev_Poll_Reactor_Echo_Test: !nsk !ST
Dirent_Test: !VxWorks_RTP !LabVIEW_RT
Dynamic_Priority_Test
Dynamic_Test
diff --git a/ACE/tests/tests.mpc b/ACE/tests/tests.mpc
index 704010d4130..3ff247f79c0 100644
--- a/ACE/tests/tests.mpc
+++ b/ACE/tests/tests.mpc
@@ -715,6 +715,13 @@ project(Dev Poll Reactor Test) : acetest {
}
}
+project(Dev Poll Reactor Echo Test) : acetest {
+ exename = Dev_Poll_Reactor_Echo_Test
+ Source_Files {
+ Dev_Poll_Reactor_Echo_Test.cpp
+ }
+}
+
project(Dirent Test) : acetest {
exename = Dirent_Test