summaryrefslogtreecommitdiff
path: root/TAO/tao/Wait_On_Read.cpp
diff options
context:
space:
mode:
authorJohnny Willemsen <jwillemsen@remedy.nl>2017-01-03 15:36:54 +0100
committerJohnny Willemsen <jwillemsen@remedy.nl>2017-01-03 15:36:54 +0100
commit6f66470f609a4aba63f95e637261859892bce458 (patch)
treeacc9ad8eab80442afbee64e4acb0f362d00430f6 /TAO/tao/Wait_On_Read.cpp
parent02052a420dea4dafaa7134e02653aa42a63d41fc (diff)
downloadATCD-6f66470f609a4aba63f95e637261859892bce458.tar.gz
The successful/error_detected/keep_waiting accessors of TAO_LF_Event are also called from outside of the leader follower loop by other threads. This could trigger a data race because an inconsistent state is ready. Moved the real logic to _i methods and the public accessors now first lock the lf lock, this is normally only done in connection setup so doesn't add a penalty to the regular invocation path
* TAO/orbsvcs/orbsvcs/SSLIOP/IIOP_SSL_Connector.cpp: * TAO/orbsvcs/orbsvcs/SSLIOP/SSLIOP_Connector.cpp: * TAO/tao/IIOP_Connector.cpp: * TAO/tao/LF_CH_Event.cpp: * TAO/tao/LF_CH_Event.h: * TAO/tao/LF_Connect_Strategy.cpp: * TAO/tao/LF_Event.cpp: * TAO/tao/LF_Event.h: * TAO/tao/LF_Event.inl: * TAO/tao/LF_Invocation_Event.cpp: * TAO/tao/LF_Invocation_Event.h: * TAO/tao/LF_Multi_Event.cpp: * TAO/tao/LF_Multi_Event.h: * TAO/tao/Leader_Follower.cpp: * TAO/tao/Reactive_Connect_Strategy.cpp: * TAO/tao/Strategies/SCIOP_Connector.cpp: * TAO/tao/Strategies/SHMIOP_Connector.cpp: * TAO/tao/Strategies/UIOP_Connector.cpp: * TAO/tao/Transport_Connector.cpp: * TAO/tao/Wait_On_Reactor.cpp: * TAO/tao/Wait_On_Read.cpp: * TAO/tests/Bug_3531b_Regression/server.cpp:
Diffstat (limited to 'TAO/tao/Wait_On_Read.cpp')
-rw-r--r--TAO/tao/Wait_On_Read.cpp14
1 files changed, 8 insertions, 6 deletions
diff --git a/TAO/tao/Wait_On_Read.cpp b/TAO/tao/Wait_On_Read.cpp
index 43176403909..f2cc92134e3 100644
--- a/TAO/tao/Wait_On_Read.cpp
+++ b/TAO/tao/Wait_On_Read.cpp
@@ -81,8 +81,10 @@ TAO_Wait_On_Read::wait (ACE_Time_Value * max_wait_time,
// method.
TAO::ORB_Countdown_Time countdown (max_wait_time);
- rd.state_changed (TAO_LF_Event::LFS_ACTIVE,
- this->transport_->orb_core ()->leader_follower ());
+ TAO_Leader_Follower &leader_follower =
+ this->transport_->orb_core ()->leader_follower ();
+
+ rd.state_changed (TAO_LF_Event::LFS_ACTIVE, leader_follower);
// Do the same sort of looping that is done in other wait
// strategies.
@@ -94,7 +96,7 @@ TAO_Wait_On_Read::wait (ACE_Time_Value * max_wait_time,
// If we got our reply, no need to run the loop any
// further.
- if (!rd.keep_waiting ())
+ if (!rd.keep_waiting (leader_follower))
break;
// @@ We are not checking for timeouts here...
@@ -104,12 +106,12 @@ TAO_Wait_On_Read::wait (ACE_Time_Value * max_wait_time,
break;
}
- if (rd.error_detected () == -1 || retval == -1)
+ if (rd.error_detected (leader_follower) || retval == -1)
{
this->transport_->close_connection ();
}
- if (rd.successful ())
+ if (rd.successful (leader_follower))
{
TAO_ORB_Core * const oc =
this->transport_->orb_core ();
@@ -147,7 +149,7 @@ TAO_Wait_On_Read::wait (ACE_Time_Value * max_wait_time,
return 0;
}
- if (rd.error_detected ())
+ if (rd.error_detected (leader_follower))
return -1;
return 1;