Fixed segmentation fault during ipc_client library un-initialization

caused when the RemoteEndPoint map destructor was executed before the
library destructor.
This commit is contained in:
Juan Carlos Luciani 2008-01-14 20:05:36 +00:00
parent 5862e5062b
commit 572284e80f

View File

@ -58,20 +58,20 @@ int DebugLevel = 0;
bool UseSyslog = false; bool UseSyslog = false;
// Application Name for logging purposes // Application Name for logging purposes
char unInitialized[] = "Uninitialized"; static char unInitialized[] = "Uninitialized";
char *pAppName = unInitialized; char *pAppName = unInitialized;
// Application threaded information // Application threaded information
bool appMultithreaded; static bool appMultithreaded;
// Client mutex // Client mutex
pthread_mutex_t clientMutex; static pthread_mutex_t clientMutex;
// Mutex for interlocked operations // Mutex for interlocked operations
pthread_mutex_t interlockedMutex; pthread_mutex_t interlockedMutex;
// Indicators // Indicators
bool svcInitialized = false; static bool svcInitialized = false;
// Map of open remote endpoints. // Map of open remote endpoints.
// //
@ -83,10 +83,10 @@ bool svcInitialized = false;
typedef map<uint32_t, SmartPtr<RemoteEndPoint>*> REPMap; typedef map<uint32_t, SmartPtr<RemoteEndPoint>*> REPMap;
typedef REPMap::iterator REPMapIter; typedef REPMap::iterator REPMapIter;
typedef pair<REPMapIter, bool> REPIterBoolPair; typedef pair<REPMapIter, bool> REPIterBoolPair;
REPMap repMap; static REPMap *g_pRepMap = NULL;
// RemoteEndPoint handle allocator // RemoteEndPoint handle allocator
uint32_t remoteEndPointHandleAllocator = 1; static uint32_t remoteEndPointHandleAllocator = 1;
//++======================================================================= //++=======================================================================
@ -159,7 +159,7 @@ IpcClientOpenUnixRemoteEndPoint(
{ {
// Insert the new RemoteEndPoint into the REP map // Insert the new RemoteEndPoint into the REP map
REPIterBoolPair insertResult; REPIterBoolPair insertResult;
insertResult = repMap.insert(make_pair(handle, pSmartRemoteEndPoint)); insertResult = g_pRepMap->insert(make_pair(handle, pSmartRemoteEndPoint));
if (!insertResult.second) if (!insertResult.second)
{ {
// Insertion failed // Insertion failed
@ -275,7 +275,7 @@ IpcClientOpenInetRemoteEndPoint(
{ {
// Insert the new RemoteEndPoint into the REP map // Insert the new RemoteEndPoint into the REP map
REPIterBoolPair insertResult; REPIterBoolPair insertResult;
insertResult = repMap.insert(make_pair(handle, pSmartRemoteEndPoint)); insertResult = g_pRepMap->insert(make_pair(handle, pSmartRemoteEndPoint));
if (!insertResult.second) if (!insertResult.second)
{ {
// Insertion failed // Insertion failed
@ -352,12 +352,12 @@ IpcClientCloseRemoteEndPoint(
// Find the appropriate RemoteEndPoint object in the REP Map using // Find the appropriate RemoteEndPoint object in the REP Map using
// the handle provided by the caller. // the handle provided by the caller.
REPMapIter iter = repMap.find(endPointHandle); REPMapIter iter = g_pRepMap->find(endPointHandle);
if (iter != repMap.end()) if (iter != g_pRepMap->end())
{ {
// Object was found in the map, remove it. // Object was found in the map, remove it.
SmartRemoteEndPoint *pSmartRemoteEndPoint = iter->second; SmartRemoteEndPoint *pSmartRemoteEndPoint = iter->second;
repMap.erase(iter); g_pRepMap->erase(iter);
// Release our mutex before deleting the endpoint // Release our mutex before deleting the endpoint
pthread_mutex_unlock(&clientMutex); pthread_mutex_unlock(&clientMutex);
@ -452,8 +452,8 @@ IpcClientSubmitReq(
// Find the appropriate RemoteEndPoint object in the REP Map using // Find the appropriate RemoteEndPoint object in the REP Map using
// the handle provided by the caller. // the handle provided by the caller.
REPMapIter iter = repMap.find(endPointHandle); REPMapIter iter = g_pRepMap->find(endPointHandle);
if (iter != repMap.end()) if (iter != g_pRepMap->end())
{ {
// Object was found in the map, use it to submit // Object was found in the map, use it to submit
// the request. // the request.
@ -543,6 +543,7 @@ IpcClientInit(
// Verify that we have not been initialized already // Verify that we have not been initialized already
if (!svcInitialized) if (!svcInitialized)
{ {
try {
// Save a copy of the application name // Save a copy of the application name
pAppName = new char[strlen(pName) + 1]; pAppName = new char[strlen(pName) + 1];
if (pAppName == NULL) if (pAppName == NULL)
@ -552,6 +553,9 @@ IpcClientInit(
} }
strcpy(pAppName, pName); strcpy(pAppName, pName);
// Create our Remote Endpoint Map
g_pRepMap = new REPMap();
// Save the app multithreaded information // Save the app multithreaded information
appMultithreaded = multithreaded; appMultithreaded = multithreaded;
@ -566,6 +570,10 @@ IpcClientInit(
// Success // Success
svcInitialized = true; svcInitialized = true;
retStatus = 0; retStatus = 0;
} catch (...) {
DbgTrace(0, "IpcClientInit- Exception caught\n", 0);
}
} }
else else
{ {
@ -606,11 +614,11 @@ IpcClientShutdown(void)
// Clean up the REP map // Clean up the REP map
pthread_mutex_lock(&clientMutex); pthread_mutex_lock(&clientMutex);
while (!repMap.empty()) while (!g_pRepMap->empty())
{ {
REPMapIter iter = repMap.begin(); REPMapIter iter = g_pRepMap->begin();
SmartRemoteEndPoint *pSmartRemoteEndPoint = iter->second; SmartRemoteEndPoint *pSmartRemoteEndPoint = iter->second;
repMap.erase(iter); g_pRepMap->erase(iter);
pthread_mutex_unlock(&clientMutex); pthread_mutex_unlock(&clientMutex);
delete pSmartRemoteEndPoint; delete pSmartRemoteEndPoint;
@ -629,6 +637,10 @@ IpcClientShutdown(void)
while (numCChannelObjects) while (numCChannelObjects)
sleep(0); // Only suffer a time-slice sleep(0); // Only suffer a time-slice
sleep(0); sleep(0);
// Delete the Remote Endpoint Map
delete(g_pRepMap);
g_pRepMap = NULL;
} }
else else
{ {