New upstream version 8.1.0
This commit is contained in:
88
storage/source/components/chunkfetcher/ChunkFetcher.cpp
Normal file
88
storage/source/components/chunkfetcher/ChunkFetcher.cpp
Normal file
@@ -0,0 +1,88 @@
|
||||
#include "ChunkFetcher.h"
|
||||
|
||||
#include <program/Program.h>
|
||||
|
||||
#include <common/Common.h>
|
||||
|
||||
ChunkFetcher::ChunkFetcher()
|
||||
: log("ChunkFetcher")
|
||||
{
|
||||
// for each targetID, put one fetcher thread into list
|
||||
for (const auto& mapping : Program::getApp()->getStorageTargets()->getTargets())
|
||||
this->slaves.emplace_back(mapping.first);
|
||||
}
|
||||
|
||||
ChunkFetcher::~ChunkFetcher()
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Start fetcher slaves if they are not running already.
|
||||
*
|
||||
* @return true if successfully started or already running, false if startup problem occurred.
|
||||
*/
|
||||
bool ChunkFetcher::startFetching()
|
||||
{
|
||||
const char* logContext = "ChunkFetcher (start)";
|
||||
bool retVal = true; // false if error occurred
|
||||
|
||||
{
|
||||
const std::lock_guard<Mutex> lock(chunksListMutex);
|
||||
isBad = false;
|
||||
}
|
||||
|
||||
for(ChunkFetcherSlaveListIter iter = slaves.begin(); iter != slaves.end(); iter++)
|
||||
{
|
||||
const std::lock_guard<Mutex> lock(iter->statusMutex);
|
||||
|
||||
if(!iter->isRunning)
|
||||
{
|
||||
// slave thread not running yet => start it
|
||||
iter->resetSelfTerminate();
|
||||
|
||||
try
|
||||
{
|
||||
iter->start();
|
||||
|
||||
iter->isRunning = true;
|
||||
}
|
||||
catch (PThreadCreateException& e)
|
||||
{
|
||||
LogContext(logContext).logErr(std::string("Unable to start thread: ") + e.what());
|
||||
retVal = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return retVal;
|
||||
}
|
||||
|
||||
void ChunkFetcher::stopFetching()
|
||||
{
|
||||
for(ChunkFetcherSlaveListIter iter = slaves.begin(); iter != slaves.end(); iter++)
|
||||
{
|
||||
const std::lock_guard<Mutex> lock(iter->statusMutex);
|
||||
|
||||
if(iter->isRunning)
|
||||
{
|
||||
iter->selfTerminate();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ChunkFetcher::waitForStopFetching()
|
||||
{
|
||||
for(ChunkFetcherSlaveListIter iter = slaves.begin(); iter != slaves.end(); iter++)
|
||||
{
|
||||
const std::lock_guard<Mutex> lock(iter->statusMutex);
|
||||
|
||||
chunksListFetchedCondition.broadcast();
|
||||
|
||||
while (iter->isRunning)
|
||||
{
|
||||
iter->isRunningChangeCond.wait(&(iter->statusMutex));
|
||||
}
|
||||
|
||||
chunksList.clear();
|
||||
}
|
||||
}
|
||||
101
storage/source/components/chunkfetcher/ChunkFetcher.h
Normal file
101
storage/source/components/chunkfetcher/ChunkFetcher.h
Normal file
@@ -0,0 +1,101 @@
|
||||
#pragma once
|
||||
|
||||
#include <components/chunkfetcher/ChunkFetcherSlave.h>
|
||||
#include <common/toolkit/ListTk.h>
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#define MAX_CHUNKLIST_SIZE 5000
|
||||
|
||||
// forward declaration
|
||||
class ChunkFetcher;
|
||||
|
||||
typedef std::list<ChunkFetcherSlave> ChunkFetcherSlaveList;
|
||||
typedef ChunkFetcherSlaveList::iterator ChunkFetcherSlaveListIter;
|
||||
|
||||
/**
|
||||
* This is not a component that represents a separate thread. Instead, it contains and controls
|
||||
* slave threads, which are started and stopped on request (i.e. they are not automatically started
|
||||
* when the app is started).
|
||||
* The slave threads will run over all chunks on all targets and read them in a format suitable for
|
||||
* fsck
|
||||
*/
|
||||
class ChunkFetcher
|
||||
{
|
||||
public:
|
||||
ChunkFetcher();
|
||||
virtual ~ChunkFetcher();
|
||||
|
||||
bool startFetching();
|
||||
void stopFetching();
|
||||
void waitForStopFetching();
|
||||
|
||||
private:
|
||||
LogContext log;
|
||||
ChunkFetcherSlaveList slaves;
|
||||
|
||||
FsckChunkList chunksList;
|
||||
Mutex chunksListMutex;
|
||||
Condition chunksListFetchedCondition;
|
||||
bool isBad;
|
||||
|
||||
public:
|
||||
bool getIsBad()
|
||||
{
|
||||
const std::lock_guard<Mutex> lock(chunksListMutex);
|
||||
|
||||
return isBad;
|
||||
}
|
||||
|
||||
void setBad()
|
||||
{
|
||||
const std::lock_guard<Mutex> lock(chunksListMutex);
|
||||
|
||||
isBad = true;
|
||||
}
|
||||
|
||||
void addChunk(FsckChunk& chunk)
|
||||
{
|
||||
const std::lock_guard<Mutex> lock(chunksListMutex);
|
||||
|
||||
if (chunksList.size() > MAX_CHUNKLIST_SIZE)
|
||||
chunksListFetchedCondition.wait(&chunksListMutex);
|
||||
|
||||
chunksList.push_back(chunk);
|
||||
}
|
||||
|
||||
bool isQueueEmpty()
|
||||
{
|
||||
std::lock_guard<Mutex> lock(chunksListMutex);
|
||||
return chunksList.empty();
|
||||
}
|
||||
|
||||
|
||||
void getAndDeleteChunks(FsckChunkList& outList, unsigned numChunks)
|
||||
{
|
||||
const std::lock_guard<Mutex> lock(chunksListMutex);
|
||||
|
||||
FsckChunkListIter iterEnd = this->chunksList.begin();
|
||||
ListTk::advance(this->chunksList, iterEnd, numChunks);
|
||||
|
||||
outList.splice(outList.end(), this->chunksList, this->chunksList.begin(), iterEnd);
|
||||
|
||||
chunksListFetchedCondition.signal();
|
||||
}
|
||||
|
||||
unsigned getNumRunning()
|
||||
{
|
||||
unsigned retVal = 0;
|
||||
|
||||
for (ChunkFetcherSlaveListIter iter = slaves.begin(); iter != slaves.end(); iter++)
|
||||
{
|
||||
const std::lock_guard<Mutex> lock(iter->statusMutex);
|
||||
|
||||
if ( iter->isRunning )
|
||||
retVal++;
|
||||
}
|
||||
|
||||
return retVal;
|
||||
}
|
||||
};
|
||||
|
||||
165
storage/source/components/chunkfetcher/ChunkFetcherSlave.cpp
Normal file
165
storage/source/components/chunkfetcher/ChunkFetcherSlave.cpp
Normal file
@@ -0,0 +1,165 @@
|
||||
#include "ChunkFetcherSlave.h"
|
||||
|
||||
#include <program/Program.h>
|
||||
|
||||
#include <boost/static_assert.hpp>
|
||||
#include <libgen.h>
|
||||
|
||||
ChunkFetcherSlave::ChunkFetcherSlave(uint16_t targetID):
|
||||
PThread("ChunkFetcherSlave-" + StringTk::uintToStr(targetID) ),
|
||||
log("ChunkFetcherSlave-" + StringTk::uintToStr(targetID) ),
|
||||
isRunning(false),
|
||||
targetID(targetID)
|
||||
{
|
||||
}
|
||||
|
||||
ChunkFetcherSlave::~ChunkFetcherSlave()
|
||||
{
|
||||
}
|
||||
|
||||
void ChunkFetcherSlave::run()
|
||||
{
|
||||
setIsRunning(true);
|
||||
|
||||
try
|
||||
{
|
||||
registerSignalHandler();
|
||||
|
||||
walkAllChunks();
|
||||
|
||||
log.log(4, "Component stopped.");
|
||||
}
|
||||
catch(std::exception& e)
|
||||
{
|
||||
PThread::getCurrentThreadApp()->handleComponentException(e);
|
||||
}
|
||||
|
||||
setIsRunning(false);
|
||||
}
|
||||
|
||||
/*
|
||||
* walk over all chunks in that target
|
||||
*/
|
||||
void ChunkFetcherSlave::walkAllChunks()
|
||||
{
|
||||
App* app = Program::getApp();
|
||||
|
||||
log.log(Log_DEBUG, "Starting chunks walk...");
|
||||
|
||||
const auto& target = *app->getStorageTargets()->getTargets().at(targetID);
|
||||
|
||||
const auto& targetPath = target.getPath().str();
|
||||
|
||||
// walk over "normal" chunks (i.e. no mirrors)
|
||||
std::string walkPath = targetPath + "/" + CONFIG_CHUNK_SUBDIR_NAME;
|
||||
if(!walkChunkPath(walkPath, 0, walkPath.size() ) )
|
||||
return;
|
||||
|
||||
// let's find out if this target is part of a buddy mirror group and if it is the primary
|
||||
// target; if it is, walk over buddy mirror directory
|
||||
bool isPrimaryTarget;
|
||||
uint16_t buddyGroupID = app->getMirrorBuddyGroupMapper()->getBuddyGroupID(this->targetID,
|
||||
&isPrimaryTarget);
|
||||
|
||||
if (isPrimaryTarget)
|
||||
{
|
||||
walkPath = targetPath + "/" CONFIG_BUDDYMIRROR_SUBDIR_NAME;
|
||||
if(!walkChunkPath(walkPath, buddyGroupID, walkPath.size() ) )
|
||||
return;
|
||||
}
|
||||
|
||||
log.log(Log_DEBUG, "End of chunks walk.");
|
||||
}
|
||||
|
||||
bool ChunkFetcherSlave::walkChunkPath(const std::string& path, uint16_t buddyGroupID,
|
||||
unsigned basePathLen)
|
||||
{
|
||||
DIR* dir = ::opendir(path.c_str() );
|
||||
if(!dir)
|
||||
{
|
||||
LOG(GENERAL, WARNING, "Could not open directory.", path, targetID, sysErr);
|
||||
Program::getApp()->getChunkFetcher()->setBad();
|
||||
return false;
|
||||
}
|
||||
|
||||
int readRes;
|
||||
bool result = true;
|
||||
|
||||
std::string pathBuf = path;
|
||||
pathBuf.push_back('/');
|
||||
|
||||
while(!getSelfTerminate())
|
||||
{
|
||||
::dirent* item;
|
||||
|
||||
// we really want struct struct dirent to contain a reasonably sized array for the filename
|
||||
BOOST_STATIC_ASSERT(sizeof(item->d_name) >= NAME_MAX + 1);
|
||||
|
||||
#if USE_READDIR_R
|
||||
::dirent entry;
|
||||
readRes = ::readdir_r(dir, &entry, &item);
|
||||
#else
|
||||
errno = 0;
|
||||
item = readdir(dir);
|
||||
readRes = item ? 0 : errno;
|
||||
#endif
|
||||
if(readRes != 0)
|
||||
{
|
||||
LOG(GENERAL, WARNING, "readdir failed.", path, targetID, sysErr(readRes));
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if(!item)
|
||||
break;
|
||||
|
||||
if(::strcmp(item->d_name, ".") == 0 || ::strcmp(item->d_name, "..") == 0)
|
||||
continue;
|
||||
|
||||
pathBuf.resize(path.size() + 1);
|
||||
pathBuf += item->d_name;
|
||||
|
||||
struct stat statBuf;
|
||||
|
||||
int statRes = ::stat(pathBuf.c_str(), &statBuf);
|
||||
if(statRes)
|
||||
{
|
||||
LOG(GENERAL, WARNING, "Could not stat directory.", ("path", pathBuf), targetID, sysErr);
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if(S_ISDIR(statBuf.st_mode) )
|
||||
{
|
||||
result = walkChunkPath(pathBuf, buddyGroupID, basePathLen);
|
||||
if(!result)
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
const char* relativeChunkPath = pathBuf.c_str() + basePathLen + 1;
|
||||
|
||||
// get only the dirname part of the path
|
||||
char* tmpPathCopy = strdup(relativeChunkPath);
|
||||
Path savedPath(dirname(tmpPathCopy) );
|
||||
|
||||
free(tmpPathCopy);
|
||||
|
||||
FsckChunk fsckChunk(item->d_name, targetID, savedPath, statBuf.st_size, statBuf.st_blocks,
|
||||
statBuf.st_ctime, statBuf.st_mtime, statBuf.st_atime, statBuf.st_uid, statBuf.st_gid,
|
||||
buddyGroupID);
|
||||
|
||||
Program::getApp()->getChunkFetcher()->addChunk(fsckChunk);
|
||||
}
|
||||
}
|
||||
|
||||
::closedir(dir);
|
||||
|
||||
if (getSelfTerminate())
|
||||
result = false;
|
||||
|
||||
if(!result)
|
||||
Program::getApp()->getChunkFetcher()->setBad();
|
||||
|
||||
return result;
|
||||
}
|
||||
62
storage/source/components/chunkfetcher/ChunkFetcherSlave.h
Normal file
62
storage/source/components/chunkfetcher/ChunkFetcherSlave.h
Normal file
@@ -0,0 +1,62 @@
|
||||
#pragma once
|
||||
|
||||
#include <common/app/log/LogContext.h>
|
||||
#include <common/components/ComponentInitException.h>
|
||||
#include <common/fsck/FsckChunk.h>
|
||||
#include <common/threading/PThread.h>
|
||||
|
||||
#include <mutex>
|
||||
|
||||
class ChunkFetcher; //forward decl.
|
||||
|
||||
/**
|
||||
* This component runs over all chunks of one target and gathers information suitable for fsck
|
||||
*
|
||||
* This component is not auto-started when the app starts. It is started and stopped by the
|
||||
* ChunkFetcher.
|
||||
*/
|
||||
class ChunkFetcherSlave : public PThread
|
||||
{
|
||||
friend class ChunkFetcher; // (to grant access to internal mutex)
|
||||
|
||||
public:
|
||||
ChunkFetcherSlave(uint16_t targetID);
|
||||
virtual ~ChunkFetcherSlave();
|
||||
|
||||
private:
|
||||
LogContext log;
|
||||
|
||||
Mutex statusMutex; // protects isRunning
|
||||
Condition isRunningChangeCond;
|
||||
|
||||
bool isRunning; // true if an instance of this component is currently running
|
||||
|
||||
uint16_t targetID;
|
||||
|
||||
virtual void run();
|
||||
|
||||
public:
|
||||
// getters & setters
|
||||
bool getIsRunning(bool isRunning)
|
||||
{
|
||||
const std::lock_guard<Mutex> lock(statusMutex);
|
||||
|
||||
return this->isRunning;
|
||||
}
|
||||
|
||||
private:
|
||||
void walkAllChunks();
|
||||
|
||||
bool walkChunkPath(const std::string& path, uint16_t buddyGroupID, unsigned basePathLen);
|
||||
|
||||
// getters & setters
|
||||
|
||||
void setIsRunning(bool isRunning)
|
||||
{
|
||||
const std::lock_guard<Mutex> lock(statusMutex);
|
||||
|
||||
this->isRunning = isRunning;
|
||||
isRunningChangeCond.broadcast();
|
||||
}
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user