New upstream version 8.1.0

This commit is contained in:
geos_one
2025-08-10 01:34:16 +02:00
commit c891bb7105
4398 changed files with 838833 additions and 0 deletions

View File

@@ -0,0 +1,361 @@
#include <common/net/message/session/opening/CloseChunkFileMsg.h>
#include <common/net/message/session/opening/CloseChunkFileRespMsg.h>
#include <common/toolkit/MessagingTk.h>
#include <common/toolkit/SessionTk.h>
#include <components/worker/CloseChunkFileWork.h>
#include <net/msghelpers/MsgHelperUnlink.h>
#include <program/Program.h>
#include <session/SessionStore.h>
#include <storage/MetaStore.h>
#include "MsgHelperClose.h"
#include <boost/lexical_cast.hpp>
/**
* The wrapper for closeSessionFile() and closeChunkFile().
*
* @param maxUsedNodeIndex zero-based index, -1 means "none"
* @param msgUserID only used for msg header info.
* @param outUnlinkDisposalFile true if the hardlink count of the file was 0
*/
FhgfsOpsErr MsgHelperClose::closeFile(const NumNodeID sessionID, const std::string& fileHandleID,
EntryInfo* entryInfo, int maxUsedNodeIndex, unsigned msgUserID, bool* outUnlinkDisposalFile,
unsigned* outNumHardlinks, bool& outLastWriterClosed, DynamicFileAttribsVec* dynAttribs,
MirroredTimestamps* timestamps)
{
MetaStore* metaStore = Program::getApp()->getMetaStore();
unsigned accessFlags;
unsigned numInodeRefs;
MetaFileHandle inode;
*outUnlinkDisposalFile = false;
FhgfsOpsErr sessionRes = closeSessionFile(sessionID, fileHandleID, entryInfo,
&accessFlags, inode);
if(unlikely(sessionRes != FhgfsOpsErr_SUCCESS) )
return sessionRes;
FhgfsOpsErr chunksRes = closeChunkFile(
sessionID, fileHandleID, maxUsedNodeIndex, *inode, entryInfo, msgUserID, dynAttribs);
if (timestamps)
{
StatData sd;
inode->getStatData(sd);
*timestamps = sd.getMirroredTimestamps();
}
metaStore->closeFile(entryInfo, std::move(inode), accessFlags, outNumHardlinks, &numInodeRefs,
outLastWriterClosed);
if (!*outNumHardlinks && !numInodeRefs)
*outUnlinkDisposalFile = true;
return chunksRes;
}
/**
* Close session in SessionStore.
*
* @param outCloseFile caller is responsible for calling MetaStore::closeFile() later if we
* returned success
*/
FhgfsOpsErr MsgHelperClose::closeSessionFile(const NumNodeID sessionID,
const std::string& fileHandleID, EntryInfo* entryInfo, unsigned* outAccessFlags,
MetaFileHandle& outCloseInode)
{
const char* logContext = "Close Helper (close session file)";
FhgfsOpsErr closeRes = FhgfsOpsErr_INTERNAL;
unsigned ownerFD = SessionTk::ownerFDFromHandleID(fileHandleID);
outCloseInode = {};
// find sessionFile
SessionStore* sessions = entryInfo->getIsBuddyMirrored()
? Program::getApp()->getMirroredSessions()
: Program::getApp()->getSessions();
Session* session = sessions->referenceSession(sessionID, true);
SessionFileStore* sessionFiles = session->getFiles();
SessionFile* sessionFile = sessionFiles->referenceSession(ownerFD);
if(!sessionFile)
{ // sessionFile not exists
// note: nevertheless, we try to forward the close to the storage servers,
// because the meta-server just might have been restarted (for whatever reason).
// so we open the file here (if possible) and let the caller go on as if nothing was wrong...
MetaStore* metaStore = Program::getApp()->getMetaStore();
LogContext(logContext).log(Log_DEBUG, std::string("File not open ") +
"(session: " + sessionID.str() + "; "
"handle: " + StringTk::uintToStr(ownerFD) + "; "
"parentID: " + entryInfo->getParentEntryID() + "; "
"ID: " + entryInfo->getEntryID() + ")" );
*outAccessFlags = OPENFILE_ACCESS_READWRITE;
bool bypassAccessCheck = false; // Enforce regular file access restrictions
closeRes = metaStore->openFile(entryInfo, *outAccessFlags, bypassAccessCheck, outCloseInode);
}
else
{ // sessionFile exists
// save access flags and file for later
outCloseInode = sessionFile->releaseInode();
*outAccessFlags = sessionFile->getAccessFlags();
sessionFiles->releaseSession(sessionFile, entryInfo);
if(!sessionFiles->removeSession(ownerFD) )
{ // removal failed
LogContext(logContext).log(Log_WARNING, "Unable to remove file session "
"(still in use, marked for async cleanup now). "
"SessionID: " + sessionID.str() + "; "
"FileHandle: " + std::string(fileHandleID) );
}
else
{ // file session removed => caller can close file
closeRes = FhgfsOpsErr_SUCCESS;
}
}
sessions->releaseSession(session);
return closeRes;
}
/**
* Close chunk files on storage servers.
*
* Note: This method is also called by the hbMgr during client sync.
*
* @param msgUserID only for msg header info.
*/
FhgfsOpsErr MsgHelperClose::closeChunkFile(const NumNodeID sessionID,
const std::string& fileHandleID, int maxUsedNodeIndex, FileInode& inode, EntryInfo *entryInfo,
unsigned msgUserID, DynamicFileAttribsVec* dynAttribs)
{
if(maxUsedNodeIndex == -1)
return FhgfsOpsErr_SUCCESS; // file contents were not accessed => nothing to do
else
if( (maxUsedNodeIndex > 0) ||
(inode.getStripePattern()->getPatternType() == StripePatternType_BuddyMirror) )
return closeChunkFileParallel(
sessionID, fileHandleID, maxUsedNodeIndex, inode, entryInfo, msgUserID, dynAttribs);
else
return closeChunkFileSequential(
sessionID, fileHandleID, maxUsedNodeIndex, inode, entryInfo, msgUserID, dynAttribs);
}
/**
* Note: This method does not work for mirrored files; use closeChunkFileParallel() for those.
*
* @param maxUsedNodeIndex (zero-based position in nodeID vector)
* @param msgUserID only for msg header info.
*/
FhgfsOpsErr MsgHelperClose::closeChunkFileSequential(const NumNodeID sessionID,
const std::string& fileHandleID, int maxUsedNodeIndex, FileInode& inode, EntryInfo *entryInfo,
unsigned msgUserID, DynamicFileAttribsVec* dynAttribs)
{
const char* logContext = "Close Helper (close chunk files S)";
FhgfsOpsErr retVal = FhgfsOpsErr_SUCCESS;
TargetMapper* targetMapper = Program::getApp()->getTargetMapper();
TargetStateStore* targetStates = Program::getApp()->getTargetStateStore();
NodeStore* nodes = Program::getApp()->getStorageNodes();
StripePattern* pattern = inode.getStripePattern();
PathInfo pathInfo;
const UInt16Vector* targetIDs = pattern->getStripeTargetIDs();
DynamicFileAttribsVec dynAttribsVec(targetIDs->size() );
inode.getPathInfo(&pathInfo);
// send request to each node and receive the response message
int currentTargetIndex = 0;
for(UInt16VectorConstIter iter = targetIDs->begin();
(currentTargetIndex <= maxUsedNodeIndex) && (iter != targetIDs->end() );
iter++, currentTargetIndex++)
{
uint16_t targetID = *iter;
CloseChunkFileMsg closeMsg(sessionID, fileHandleID, targetID, &pathInfo);
closeMsg.setMsgHeaderUserID(msgUserID);
RequestResponseArgs rrArgs(NULL, &closeMsg, NETMSGTYPE_CloseChunkFileResp);
RequestResponseTarget rrTarget(targetID, targetMapper, nodes);
rrTarget.setTargetStates(targetStates);
// send request to node and receive response
FhgfsOpsErr requestRes = MessagingTk::requestResponseTarget(&rrTarget, &rrArgs);
if(requestRes != FhgfsOpsErr_SUCCESS)
{ // communication error
LogContext(logContext).log(Log_WARNING,
"Communication with storage target failed: " + StringTk::uintToStr(targetID) + "; "
"FileHandle: " + fileHandleID + "; "
"Error: " + boost::lexical_cast<std::string>(requestRes));
if(retVal == FhgfsOpsErr_SUCCESS)
retVal = requestRes;
continue;
}
// correct response type received
CloseChunkFileRespMsg* closeRespMsg = (CloseChunkFileRespMsg*)rrArgs.outRespMsg.get();
FhgfsOpsErr closeRemoteRes = closeRespMsg->getResult();
// set current dynamic attribs (even if result not success, because then storageVersion==0)
DynamicFileAttribs currentDynAttribs(closeRespMsg->getStorageVersion(),
closeRespMsg->getFileSize(), closeRespMsg->getAllocedBlocks(),
closeRespMsg->getModificationTimeSecs(), closeRespMsg->getLastAccessTimeSecs() );
dynAttribsVec[currentTargetIndex] = currentDynAttribs;
if(unlikely(closeRemoteRes != FhgfsOpsErr_SUCCESS) )
{ // error: chunk file close problem
int logLevel = Log_WARNING;
if(closeRemoteRes == FhgfsOpsErr_INUSE)
logLevel = Log_DEBUG; // happens on ctrl+c, so don't irritate user with these log msgs
LogContext(logContext).log(logLevel,
"Storage target was unable to close chunk file: " + StringTk::uintToStr(targetID) + "; "
"Error: " + boost::lexical_cast<std::string>(closeRemoteRes) + "; "
"Session: " + sessionID.str() + "; "
"FileHandle: " + fileHandleID);
if(closeRemoteRes == FhgfsOpsErr_INUSE)
continue; // don't escalate this error to client (happens on ctrl+c)
retVal = closeRemoteRes;
continue;
}
// success: chunk file closed
LOG_DEBUG(logContext, Log_DEBUG,
"Storage target closed chunk file: " + StringTk::uintToStr(targetID) + "; "
"FileHandle: " + fileHandleID);
}
inode.setDynAttribs(dynAttribsVec); // the actual update
if (dynAttribs)
dynAttribs->swap(dynAttribsVec);
if(unlikely(retVal != FhgfsOpsErr_SUCCESS) )
LogContext(logContext).log(Log_WARNING,
"Problems occurred during close of chunk files. "
"FileHandle: " + fileHandleID);
return retVal;
}
/**
* @param maxUsedNodeIndex (zero-based position in nodeID vector)
* @param msgUserID only for msg header info.
*/
FhgfsOpsErr MsgHelperClose::closeChunkFileParallel(const NumNodeID sessionID,
const std::string& fileHandleID, int maxUsedNodeIndex, FileInode& inode, EntryInfo* entryInfo,
unsigned msgUserID, DynamicFileAttribsVec* dynAttribs)
{
const char* logContext = "Close Helper (close chunk files)";
App* app = Program::getApp();
MultiWorkQueue* slaveQ = app->getCommSlaveQueue();
StripePattern* pattern = inode.getStripePattern();
const UInt16Vector* targetIDs = pattern->getStripeTargetIDs();
PathInfo pathInfo;
size_t numTargetWorksHint = (maxUsedNodeIndex < 0) ? 0 : (maxUsedNodeIndex+1);
size_t numTargetWorks = BEEGFS_MIN(numTargetWorksHint, targetIDs->size() );
DynamicFileAttribsVec dynAttribsVec(targetIDs->size() );
FhgfsOpsErr retVal = FhgfsOpsErr_SUCCESS;
FhgfsOpsErrVec nodeResults(numTargetWorks);
SynchronizedCounter counter;
inode.getPathInfo(&pathInfo);
// generate work for storage targets...
for(size_t i=0; i < numTargetWorks; i++)
{
CloseChunkFileWork* work = new CloseChunkFileWork(sessionID, fileHandleID, pattern,
(*targetIDs)[i], &pathInfo, &(dynAttribsVec[i]), &(nodeResults[i]), &counter);
work->setMsgUserID(msgUserID);
slaveQ->addDirectWork(work);
}
// wait for work completion...
counter.waitForCount(numTargetWorks);
// check target results...
for(size_t i=0; i < numTargetWorks; i++)
{
if(unlikely(nodeResults[i] != FhgfsOpsErr_SUCCESS) )
{
if(nodeResults[i] == FhgfsOpsErr_INUSE)
continue; // don't escalate this error to client (happens on ctrl+c)
LogContext(logContext).log(Log_WARNING,
"Problems occurred during release of storage server file handles. "
"FileHandle: " + std::string(fileHandleID) );
retVal = nodeResults[i];
goto apply_dyn_attribs;
}
}
apply_dyn_attribs:
inode.setDynAttribs(dynAttribsVec); // the actual update
if (dynAttribs)
dynAttribs->swap(dynAttribsVec);
return retVal;
}
/**
* Unlink file in META_DISPOSALDIR_ID_STR/
*
* @param msgUserID only for msg header info.
*/
FhgfsOpsErr MsgHelperClose::unlinkDisposableFile(const std::string& fileID, unsigned msgUserID,
bool isBuddyMirrored)
{
if (isBuddyMirrored && 0 < Program::getApp()->getConfig()->getTuneDisposalGCPeriod())
return FhgfsOpsErr_SUCCESS;
// Note: This attempt to unlink directly is inefficient if the file is marked as disposable
// and is still busy (but we assume that this rarely happens)
DirInode* dir = Program::getApp()->getMetaStore()->referenceDir(
isBuddyMirrored ? META_MIRRORDISPOSALDIR_ID_STR : META_DISPOSALDIR_ID_STR,
isBuddyMirrored, true);
if (!dir)
return FhgfsOpsErr_INTERNAL;
FhgfsOpsErr disposeRes = MsgHelperUnlink::unlinkFile(*dir, fileID, msgUserID);
if (disposeRes == FhgfsOpsErr_PATHNOTEXISTS)
disposeRes = FhgfsOpsErr_SUCCESS; // file not marked for disposal => not an error
Program::getApp()->getMetaStore()->releaseDir(dir->getID());
return disposeRes;
}

View File

@@ -0,0 +1,36 @@
#pragma once
#include <common/Common.h>
#include <storage/MetaStore.h>
class MsgHelperClose
{
public:
static FhgfsOpsErr closeFile(const NumNodeID sessionID, const std::string& fileHandleID,
EntryInfo* entryInfo, int maxUsedNodeIndex, unsigned msgUserID,
bool* outUnlinkDisposalFile, unsigned* outNumHardlinks, bool& outLastWriterClosed,
DynamicFileAttribsVec* dynAttribs = NULL, MirroredTimestamps* timestamps = NULL);
static FhgfsOpsErr closeSessionFile(const NumNodeID sessionID, const std::string& fileHandleID,
EntryInfo* entryInfo, unsigned* outAccessFlags, MetaFileHandle& outCloseInode);
static FhgfsOpsErr closeChunkFile(const NumNodeID sessionID,
const std::string& fileHandleID, int maxUsedNodeIndex, FileInode& inode,
EntryInfo *entryInfo, unsigned msgUserID, DynamicFileAttribsVec* dynAttribs = NULL);
static FhgfsOpsErr unlinkDisposableFile(const std::string& fileID, unsigned msgUserID,
bool isBuddyMirrored);
private:
MsgHelperClose()
{
}
static FhgfsOpsErr closeChunkFileSequential(const NumNodeID sessionID,
const std::string& fileHandleID, int maxUsedNodeIndex, FileInode& inode,
EntryInfo *entryInfo, unsigned msgUserID, DynamicFileAttribsVec* dynAttribs);
static FhgfsOpsErr closeChunkFileParallel(const NumNodeID sessionID,
const std::string& fileHandleID, int maxUsedNodeIndex, FileInode& inode,
EntryInfo* entryInfo, unsigned msgUserID, DynamicFileAttribsVec* dynAttribs);
public:
// inliners
};

View File

@@ -0,0 +1,168 @@
#include <program/Program.h>
#include "MsgHelperLocking.h"
#include <boost/lexical_cast.hpp>
/**
* Try to recover a file session that got lost (e.g. due to a mds restart) and was implicitly
* reported to exist by a client, e.g. during a flock request. The session will be inserted into
* the store.
*
* Note: We re-open the file in r+w mode here, because we don't know the orig mode.
*
* @param outSessionFile will be set to referenced (recovered) session if success is returned,
* NULL otherwise
*/
FhgfsOpsErr MsgHelperLocking::trySesssionRecovery(EntryInfo* entryInfo, NumNodeID clientID,
unsigned ownerFD, SessionFileStore* sessionFiles, SessionFile** outSessionFile)
{
const char* logContext = "MsgHelperLocking (try session recovery)";
App* app = Program::getApp();
MetaStore* metaStore = app->getMetaStore();
LogContext(logContext).log(Log_WARNING, std::string("Attempting recovery of file session ") +
"(session: " + clientID.str() + "; "
"handle: " + StringTk::uintToStr(ownerFD) + "; "
"parentID: " + entryInfo->getParentEntryID() + "; "
"entryID: " + entryInfo->getEntryID() + ")" );
*outSessionFile = NULL;
MetaFileHandle recoveryFile;
unsigned recoveryAccessFlags = OPENFILE_ACCESS_READWRITE; /* (r+w is our only option, since we
don't know the original flags) */
bool bypassAccessCheck = false; // Enforce regular file access restrictions
FhgfsOpsErr openRes = metaStore->openFile(entryInfo, recoveryAccessFlags,
bypassAccessCheck, recoveryFile);
if(openRes != FhgfsOpsErr_SUCCESS)
{ // file could not be opened => there's nothing we can do in this case
LogContext(logContext).log(Log_WARNING, std::string("Recovery of file session failed: ") +
boost::lexical_cast<std::string>(openRes));
return openRes;
}
else
{ // file opened => try to insert the recovery file session
SessionFile* recoverySessionFile = new SessionFile(std::move(recoveryFile),
recoveryAccessFlags, entryInfo);
recoverySessionFile->setSessionID(ownerFD);
*outSessionFile = sessionFiles->addAndReferenceRecoverySession(recoverySessionFile);
if(!*outSessionFile)
{ // bad, our recovery session ID was used by someone in the meantime => cleanup
unsigned numHardlinks; // ignored here
unsigned numInodeRefs; // ignored here
bool lastWriterClosed; // ignored here
LogContext(logContext).log(Log_WARNING,
"Recovery of file session failed: SessionID is in use by another file now.");
metaStore->closeFile(entryInfo, recoverySessionFile->releaseInode(), recoveryAccessFlags,
&numHardlinks, &numInodeRefs, lastWriterClosed);
delete(recoverySessionFile);
return FhgfsOpsErr_INTERNAL;
}
else
{ // recovery succeeded
LogContext(logContext).log(Log_NOTICE, "File session recovered.");
}
}
return FhgfsOpsErr_SUCCESS;
}
/**
* Note: This method also tries session recovery for lock requests.
*/
FhgfsOpsErr MsgHelperLocking::flockAppend(EntryInfo* entryInfo, unsigned ownerFD,
EntryLockDetails& lockDetails)
{
App* app = Program::getApp();
SessionStore* sessions = entryInfo->getIsBuddyMirrored()
? app->getMirroredSessions()
: app->getSessions();
MetaStore* metaStore = app->getMetaStore();
FhgfsOpsErr retVal = FhgfsOpsErr_INTERNAL;
// find sessionFile
Session* session = sessions->referenceSession(lockDetails.getClientNumID(), true);
SessionFileStore* sessionFiles = session->getFiles();
SessionFile* sessionFile = sessionFiles->referenceSession(ownerFD);
if(!sessionFile)
{ // sessionFile not exists (mds restarted?)
// check if this is just an UNLOCK REQUEST
if(lockDetails.isUnlock() )
{ // it's an unlock => we'll just ignore it (since the locks are gone anyways)
retVal = FhgfsOpsErr_SUCCESS;
goto cleanup_session;
}
// check if this is a LOCK CANCEL REQUEST
if(lockDetails.isCancel() )
{ // it's a lock cancel
/* this is an important special case, because client might have succeeded in closing the
file but the conn might have been interrupted during unlock, so we definitely have to try
canceling the lock here */
// if the file still exists, just do the lock cancel without session recovery attempt
auto [lockCancelFile, referenceRes] = metaStore->referenceFile(entryInfo);
if(lockCancelFile)
{
lockCancelFile->flockAppend(lockDetails);
metaStore->releaseFile(entryInfo->getParentEntryID(), lockCancelFile);
}
retVal = FhgfsOpsErr_SUCCESS;
goto cleanup_session;
}
// it's a LOCK REQUEST => try to recover session file to do the locking
retVal = MsgHelperLocking::trySesssionRecovery(entryInfo, lockDetails.getClientNumID(),
ownerFD, sessionFiles, &sessionFile);
// (note: sessionFile==NULL now if recovery was not successful)
} // end of session file session recovery attempt
if(sessionFile)
{ // sessionFile exists (or was successfully recovered)
auto& file = sessionFile->getInode();
auto lockGranted = file->flockAppend(lockDetails);
if (!lockGranted.first)
retVal = FhgfsOpsErr_WOULDBLOCK;
else
retVal = FhgfsOpsErr_SUCCESS;
if (!lockGranted.second.empty())
LockingNotifier::notifyWaitersEntryLock(LockEntryNotifyType_APPEND,
file->getReferenceParentID(), file->getEntryID(), file->getIsBuddyMirrored(),
std::move(lockGranted.second));
// cleanup
sessionFiles->releaseSession(sessionFile, entryInfo);
}
// cleanup
cleanup_session:
sessions->releaseSession(session);
return retVal;
}

View File

@@ -0,0 +1,27 @@
#pragma once
#include <common/storage/EntryInfo.h>
#include <common/storage/StorageErrors.h>
#include <common/Common.h>
#include <session/SessionFileStore.h>
/**
* Common helpers for locking related messages.
*/
class MsgHelperLocking
{
public:
static FhgfsOpsErr trySesssionRecovery(EntryInfo* entryInfo, NumNodeID clientID,
unsigned ownerFD, SessionFileStore* sessionFiles, SessionFile** outSessionFile);
static FhgfsOpsErr flockAppend(EntryInfo* entryInfo, unsigned ownerFD,
EntryLockDetails& lockDetails);
private:
MsgHelperLocking() {}
};

View File

@@ -0,0 +1,53 @@
#include <common/toolkit/MessagingTk.h>
#include <components/ModificationEventFlusher.h>
#include <program/Program.h>
#include "MsgHelperMkFile.h"
/*
* @param stripePattern can be NULL, in which case a new pattern gets created; should only be set
* if this is the secondary buddy of a mirror group
*/
FhgfsOpsErr MsgHelperMkFile::mkFile(DirInode& parentDir, MkFileDetails* mkDetails,
const UInt16List* preferredTargets, const unsigned numtargets, const unsigned chunksize,
StripePattern* stripePattern, RemoteStorageTarget* rstInfo, EntryInfo* outEntryInfo,
FileInodeStoreData* outInodeData, StoragePoolId storagePoolId)
{
const char* logContext = "MsgHelperMkFile (create file)";
App* app = Program::getApp();
MetaStore* metaStore = app->getMetaStore();
ModificationEventFlusher* modEventFlusher = app->getModificationEventFlusher();
bool modEventLoggingEnabled = modEventFlusher->isLoggingEnabled();
FhgfsOpsErr retVal;
// create new stripe pattern
if ( !stripePattern )
stripePattern = parentDir.createFileStripePattern(preferredTargets, numtargets, chunksize,
storagePoolId);
// check availability of stripe targets
if(unlikely(!stripePattern || stripePattern->getStripeTargetIDs()->empty() ) )
{
LogContext(logContext).logErr(
"Unable to create stripe pattern. No storage targets available? "
"File: " + mkDetails->newName);
SAFE_DELETE(stripePattern);
return FhgfsOpsErr_INTERNAL;
}
// create meta file
retVal = metaStore->mkNewMetaFile(parentDir, mkDetails,
std::unique_ptr<StripePattern>(stripePattern), rstInfo, outEntryInfo, outInodeData);
if ( (modEventLoggingEnabled ) && ( outEntryInfo ) )
{
std::string entryID = outEntryInfo->getEntryID();
modEventFlusher->add(ModificationEvent_FILECREATED, entryID);
}
return retVal;
}

View File

@@ -0,0 +1,28 @@
#pragma once
#include <common/storage/StorageErrors.h>
#include <common/Common.h>
#include <storage/MetaStore.h>
#include <storage/MkFileDetails.h>
struct MkFileDetails; // forward declaration
/**
* Default class to create meta-data files (including inodes and directories).
*/
class MsgHelperMkFile
{
public:
static FhgfsOpsErr mkFile(DirInode& parentDir, MkFileDetails* mkDetails,
const UInt16List* preferredTargets, const unsigned numtargets, const unsigned chunksize,
StripePattern* stripePattern, RemoteStorageTarget* rstInfo, EntryInfo* outEntryInfo,
FileInodeStoreData* outInodeData, StoragePoolId storagePoolId = StoragePoolStore::INVALID_POOL_ID);
private:
MsgHelperMkFile() {}
};

View File

@@ -0,0 +1,97 @@
#include <common/toolkit/MathTk.h>
#include <common/toolkit/MessagingTk.h>
#include <common/toolkit/SessionTk.h>
#include <common/storage/striping/Raid0Pattern.h>
#include <net/msghelpers/MsgHelperTrunc.h>
#include <program/Program.h>
#include <storage/MetaStore.h>
#include "MsgHelperOpen.h"
/**
* Note: This only does the basic open; you probably still want to create the session for this
* opened file afterwards.
* Note: Also performs truncation based on accessFlags if necessary.
*
* @param msgUserID only used for msg header info.
* @param outOpenFile only set if return indicates success.
*/
FhgfsOpsErr MsgHelperOpen::openFile(EntryInfo* entryInfo, unsigned accessFlags,
bool useQuota, bool bypassAccessCheck, unsigned msgUserID, MetaFileHandle& outFileInode,
bool isSecondary)
{
const char* logContext = "Open File Helper";
IGNORE_UNUSED_VARIABLE(logContext);
bool truncLocalRequired = false;
if(accessFlags & OPENFILE_ACCESS_TRUNC)
truncLocalRequired = MsgHelperTrunc::isTruncChunkRequired(entryInfo, 0);
FhgfsOpsErr openRes = openMetaFile(entryInfo, accessFlags, bypassAccessCheck, outFileInode);
if(openRes != FhgfsOpsErr_SUCCESS)
return openRes;
/* check chunkSize for compatibility.
this check was introduced in 2011.05-r6, when we switched from arbitrary chunk sizes to the
new chunk size constraints (min size and power of two). the check can be removed in a future
release, when we are sure that there are no old installations with arbitrary chunk sizes
left. */
unsigned chunkSize = outFileInode->getStripePattern()->getChunkSize();
if(unlikely( (chunkSize < STRIPEPATTERN_MIN_CHUNKSIZE) ||
!MathTk::isPowerOfTwo(chunkSize) ) )
{ // validity check failed => don't open this file (we would risk corrupting it otherwise)
LogContext(logContext).logErr("This version of BeeGFS is not compatible with this "
"chunk size: " + StringTk::uintToStr(chunkSize) + ". "
"Refusing to open file. "
"parentInfo: " + entryInfo->getParentEntryID() + " "
"entryInfo: " + entryInfo->getEntryID() );
openMetaFileCompensate(entryInfo, std::move(outFileInode), accessFlags);
return FhgfsOpsErr_INTERNAL;
}
if(truncLocalRequired && !isSecondary)
{ // trunc was specified and is needed => do it
LOG_DEBUG(logContext, Log_DEBUG, std::string("Opening with trunc local") );
DynamicFileAttribsVec dynAttribs;
FhgfsOpsErr truncRes = MsgHelperTrunc::truncChunkFile(
*outFileInode, entryInfo, 0, useQuota, msgUserID, dynAttribs);
if(unlikely(truncRes != FhgfsOpsErr_SUCCESS) )
{ // error => undo open()
openMetaFileCompensate(entryInfo, std::move(outFileInode), accessFlags);
openRes = truncRes;
}
}
return openRes;
}
FhgfsOpsErr MsgHelperOpen::openMetaFile(EntryInfo* entryInfo, unsigned accessFlags,
bool bypassAccessCheck, MetaFileHandle& outOpenInode)
{
MetaStore* metaStore = Program::getApp()->getMetaStore();
FhgfsOpsErr openRes = metaStore->openFile(entryInfo, accessFlags, bypassAccessCheck, outOpenInode);
return openRes;
}
/**
* Undo an open.
* (Typically called when an error occurred after a successful open).
*/
void MsgHelperOpen::openMetaFileCompensate(EntryInfo* entryInfo,
MetaFileHandle inode, unsigned accessFlags)
{
MetaStore* metaStore = Program::getApp()->getMetaStore();
unsigned numHardlinks; // ignored here
unsigned numInodeRefs; // ignored here
bool lastWriterClosed; // ignored here
metaStore->closeFile(entryInfo, std::move(inode), accessFlags, &numHardlinks, &numInodeRefs,
lastWriterClosed);
}

View File

@@ -0,0 +1,26 @@
#pragma once
#include <common/Common.h>
#include <common/storage/EntryInfo.h>
#include <storage/MetaStore.h>
#include <storage/MetadataEx.h>
class MsgHelperOpen
{
public:
static FhgfsOpsErr openFile(EntryInfo* entryInfo, unsigned accessFlags,
bool useQuota, bool bypassAccessCheck, unsigned msgUserID, MetaFileHandle& outFileInode,
bool isSecondary);
private:
MsgHelperOpen() {}
static FhgfsOpsErr openMetaFile(EntryInfo* entryInfo,
unsigned accessFlags, bool bypassAccessCheck, MetaFileHandle& outOpenInode);
static void openMetaFileCompensate(EntryInfo* entryInfo,
MetaFileHandle inode, unsigned accessFlags);
};

View File

@@ -0,0 +1,258 @@
#include <common/net/message/storage/attribs/GetChunkFileAttribsMsg.h>
#include <common/net/message/storage/attribs/GetChunkFileAttribsRespMsg.h>
#include <common/toolkit/MessagingTk.h>
#include <common/toolkit/SynchronizedCounter.h>
#include <components/worker/GetChunkFileAttribsWork.h>
#include <program/Program.h>
#include "MsgHelperStat.h"
/**
* Note: This will automatically refresh dynamic attribs if they are outdated.
*
* @param loadFromDisk do we need to load the data from disk or do we want to have data from
* an already opened inode only
* @param msgUserID will only be used in msg header info.
* @param outParentNodeID may be NULL (default) if the caller is not interested
* @param outParentEntryID may NULL (if outParentNodeID is NULL)
*/
FhgfsOpsErr MsgHelperStat::stat(EntryInfo* entryInfo, bool loadFromDisk, unsigned msgUserID,
StatData& outStatData, NumNodeID* outParentNodeID, std::string* outParentEntryID)
{
const char* logContext = "Stat Helper (stat entry)";
MetaStore* metaStore = Program::getApp()->getMetaStore();
FhgfsOpsErr retVal;
retVal = metaStore->stat(entryInfo, loadFromDisk, outStatData, outParentNodeID,
outParentEntryID);
if(retVal == FhgfsOpsErr_DYNAMICATTRIBSOUTDATED)
{ // dynamic attribs outdated => get fresh dynamic attribs from storage servers and stat again
MsgHelperStat::refreshDynAttribs(entryInfo, false, msgUserID);
//note: if we are here it is regular file and we don't need to request parentData
retVal = metaStore->stat(entryInfo, loadFromDisk, outStatData);
// this time we ignore outdated dynamic attribs because refreshing them again would
// be useless (or we could keep on doing it forever)
if(retVal == FhgfsOpsErr_DYNAMICATTRIBSOUTDATED)
retVal = FhgfsOpsErr_SUCCESS;
}
else
if(retVal == FhgfsOpsErr_PATHNOTEXISTS && loadFromDisk)
{ /* metadata not found: it is hard to tell whether this is an error (e.g. metadata was never
created) or just a normal case (e.g. someone removed a file during an "ls -l") */
LogContext(logContext).log(Log_DEBUG, "Missing metadata for entryID: " +
entryInfo->getEntryID() + ". "
"(Possibly a valid race of two processes or a cached entry that is now being "
"checked by a client revalidate() method.)");
}
return retVal;
}
/**
* Refresh current file size and other dynamic attribs from storage servers.
*
* @makePersistent whether or not this method should also update persistent metadata.
* @param msgUserID only used for msg header info.
*/
FhgfsOpsErr MsgHelperStat::refreshDynAttribs(EntryInfo* entryInfo, bool makePersistent,
unsigned msgUserID)
{
MetaStore* metaStore = Program::getApp()->getMetaStore();
FhgfsOpsErr retVal;
std::string parentEntryID = entryInfo->getParentEntryID();
std::string entryID = entryInfo->getEntryID();
auto [inode, referenceRes] = metaStore->referenceFile(entryInfo);
if(!inode)
{
std::string logContext("Stat Helper (refresh filesize: parentID: " + parentEntryID
+ " entryID: " + entryID + ")");
LogContext(logContext).log(Log_DEBUG, std::string("File could not be referenced") );
return referenceRes;
}
if(inode->getStripePattern()->getAssignedNumTargets() == 1)
retVal = refreshDynAttribsSequential(*inode, entryID, msgUserID);
else
retVal = refreshDynAttribsParallel(*inode, entryID, msgUserID);
if( (retVal == FhgfsOpsErr_SUCCESS) && makePersistent)
{
bool persistenceRes = inode->updateInodeOnDisk(entryInfo);
if(!persistenceRes)
retVal = FhgfsOpsErr_INTERNAL;
}
metaStore->releaseFile(entryInfo->getParentEntryID(), inode);
return retVal;
}
/**
* Note: This method does support getting attrs from buddymirrors, but only from group's primary.
*
* @param msgUserID only used for msg header info.
*/
FhgfsOpsErr MsgHelperStat::refreshDynAttribsSequential(FileInode& inode, const std::string& entryID,
unsigned msgUserID)
{
const char* logContext = "Stat Helper (refresh chunk files S)";
FhgfsOpsErr retVal = FhgfsOpsErr_SUCCESS; // will be set to node error, if any
App* app = Program::getApp();
StripePattern* pattern = inode.getStripePattern();
const UInt16Vector* targetIDs = pattern->getStripeTargetIDs();
DynamicFileAttribsVec dynAttribsVec(targetIDs->size() );
PathInfo pathInfo;
inode.getPathInfo(&pathInfo);
// send request to each node and receive the response message
unsigned currentStripeNodeIndex = 0;
for(UInt16VectorConstIter iter = targetIDs->begin();
iter != targetIDs->end();
iter++, currentStripeNodeIndex++)
{
uint16_t targetID = *iter;
// prepare request message
GetChunkFileAttribsMsg getSizeMsg(entryID, *iter, &pathInfo);
if(pattern->getPatternType() == StripePatternType_BuddyMirror)
getSizeMsg.addMsgHeaderFeatureFlag(GETCHUNKFILEATTRSMSG_FLAG_BUDDYMIRROR);
getSizeMsg.setMsgHeaderUserID(msgUserID);
// prepare communication
RequestResponseTarget rrTarget(targetID, app->getTargetMapper(), app->getStorageNodes() );
rrTarget.setTargetStates(app->getTargetStateStore() );
if(pattern->getPatternType() == StripePatternType_BuddyMirror)
rrTarget.setMirrorInfo(app->getStorageBuddyGroupMapper(), false);
RequestResponseArgs rrArgs(NULL, &getSizeMsg, NETMSGTYPE_GetChunkFileAttribsResp);
// communicate
FhgfsOpsErr requestRes = MessagingTk::requestResponseTarget(&rrTarget, &rrArgs);
if(unlikely(requestRes != FhgfsOpsErr_SUCCESS) )
{ // communication error
LogContext(logContext).log(Log_WARNING,
std::string("Communication with storage target failed. ") +
(pattern->getPatternType() == StripePatternType_BuddyMirror ? "Mirror " : "") +
"TargetID: " + StringTk::uintToStr(targetID) + "; "
"EntryID: " + entryID);
retVal = requestRes;
continue;
}
// correct response type received
auto* getSizeRespMsg = (GetChunkFileAttribsRespMsg*)rrArgs.outRespMsg.get();
FhgfsOpsErr getSizeResult = getSizeRespMsg->getResult();
if(getSizeResult != FhgfsOpsErr_SUCCESS)
{ // error: got no fresh attributes
LogContext(logContext).log(Log_WARNING,
std::string("Getting fresh chunk file attributes from target failed. ") +
(pattern->getPatternType() == StripePatternType_BuddyMirror ? "Mirror " : "") +
"TargetID: " + StringTk::uintToStr(targetID) + "; "
"EntryID: " + entryID);
retVal = getSizeResult;
continue;
}
// success: got fresh chunk file attributes
//log.log(3, std::string("Got fresh filesize from node: ") + nodeID);
DynamicFileAttribs currentDynAttribs(getSizeRespMsg->getStorageVersion(),
getSizeRespMsg->getSize(), getSizeRespMsg->getAllocedBlocks(),
getSizeRespMsg->getModificationTimeSecs(), getSizeRespMsg->getLastAccessTimeSecs() );
dynAttribsVec[currentStripeNodeIndex] = currentDynAttribs;
}
inode.setDynAttribs(dynAttribsVec); // the actual update
if(retVal != FhgfsOpsErr_SUCCESS)
{
LogContext(logContext).log(Log_WARNING,
"Problems occurred during chunk file attributes refresh. "
"EntryID: " + entryID);
}
return retVal;
}
/**
* Note: For buddymirrored files, only group's primary is used.
*/
FhgfsOpsErr MsgHelperStat::refreshDynAttribsParallel(FileInode& inode, const std::string& entryID,
unsigned msgUserID)
{
const char* logContext = "Stat Helper (refresh chunk files)";
FhgfsOpsErr retVal = FhgfsOpsErr_SUCCESS; // will be set to node error, if any
App* app = Program::getApp();
MultiWorkQueue* slaveQ = app->getCommSlaveQueue();
StripePattern* pattern = inode.getStripePattern();
const UInt16Vector* targetIDs = pattern->getStripeTargetIDs();
DynamicFileAttribsVec dynAttribsVec(targetIDs->size() );
size_t numWorks = targetIDs->size();
FhgfsOpsErrVec nodeResults(numWorks);
SynchronizedCounter counter;
PathInfo pathInfo;
inode.getPathInfo(&pathInfo);
for(size_t i=0; i < numWorks; i++)
{
GetChunkFileAttribsWork* work = new GetChunkFileAttribsWork(entryID, pattern, (*targetIDs)[i],
&pathInfo, &(dynAttribsVec[i]), &(nodeResults[i]), &counter);
work->setMsgUserID(msgUserID);
slaveQ->addDirectWork(work);
}
counter.waitForCount(numWorks);
for(size_t i=0; i < numWorks; i++)
{
if(nodeResults[i] != FhgfsOpsErr_SUCCESS)
{
LogContext(logContext).log(Log_WARNING,
"Problems occurred during file attribs refresh. entryID: " + entryID);
retVal = nodeResults[i];
break;
}
}
inode.setDynAttribs(dynAttribsVec); // the actual update
return retVal;
}

View File

@@ -0,0 +1,30 @@
#pragma once
#include <common/Common.h>
#include <storage/MetaStore.h>
#include <storage/MetadataEx.h>
class MsgHelperStat
{
public:
static FhgfsOpsErr stat(EntryInfo* entryInfo, bool loadFromDisk, unsigned msgUserID,
StatData& outStatData, NumNodeID* outParentNodeID = NULL,
std::string* outParentEntryID = NULL);
static FhgfsOpsErr refreshDynAttribs(EntryInfo* entryInfo, bool makePersistent,
unsigned msgUserID);
private:
MsgHelperStat() {}
static FhgfsOpsErr refreshDynAttribsSequential(FileInode& inode, const std::string& entryID,
unsigned msgUserID);
static FhgfsOpsErr refreshDynAttribsParallel(FileInode& inode, const std::string& entryID,
unsigned msgUserID);
public:
// inliners
};

View File

@@ -0,0 +1,279 @@
#include <common/net/message/storage/TruncLocalFileMsg.h>
#include <common/net/message/storage/TruncLocalFileRespMsg.h>
#include <components/worker/TruncChunkFileWork.h>
#include <program/Program.h>
#include "MsgHelperTrunc.h"
#include <boost/lexical_cast.hpp>
/**
* Note: Will also update persistent metadata on disk.
*
* @param msgUserID will only be used in msg header info.
*/
FhgfsOpsErr MsgHelperTrunc::truncFile(EntryInfo* entryInfo, int64_t filesize, bool useQuota,
unsigned msgUserID, DynamicFileAttribsVec& dynAttribs)
{
MetaStore* metaStore = Program::getApp()->getMetaStore();
auto [inode, referenceRes] = metaStore->referenceFile(entryInfo);
if(!inode)
return referenceRes;
FhgfsOpsErr localRes = truncChunkFile(*inode, entryInfo, filesize, useQuota, msgUserID,
dynAttribs);
inode->updateInodeOnDisk(entryInfo);
metaStore->releaseFile(entryInfo->getParentEntryID(), inode);
return localRes;
}
/**
* Note: This also updates dynamic file attribs.
* Note: Call this directly (instead of truncFile() ) if you already got the file handle.
* Note: Will NOT update persistent metadata on disk.
*
* @param msgUserID only used for msg header info.
*/
FhgfsOpsErr MsgHelperTrunc::truncChunkFile(FileInode& inode, EntryInfo* entryInfo,
int64_t filesize, bool useQuota, unsigned userIDHint, DynamicFileAttribsVec& dynAttribs)
{
StripePattern* pattern = inode.getStripePattern();
if( (pattern->getStripeTargetIDs()->size() > 1) ||
(pattern->getPatternType() == StripePatternType_BuddyMirror) )
return truncChunkFileParallel(inode, entryInfo, filesize, useQuota, userIDHint, dynAttribs);
else
return truncChunkFileSequential(inode, entryInfo, filesize, useQuota, userIDHint, dynAttribs);
}
/**
* Note: This method does not work for mirrored files; use truncChunkFileParallel() for those.
* Note: This also updates dynamic file attribs.
*
* @param msgUserID only used for msg header info.
*/
FhgfsOpsErr MsgHelperTrunc::truncChunkFileSequential(FileInode& inode, EntryInfo* entryInfo,
int64_t filesize, bool useQuota, unsigned msgUserID, DynamicFileAttribsVec& dynAttribs)
{
const char* logContext = "Trunc chunk file helper S";
StripePattern* pattern = inode.getStripePattern();
const UInt16Vector* targetIDs = pattern->getStripeTargetIDs();
TargetMapper* targetMapper = Program::getApp()->getTargetMapper();
TargetStateStore* targetStates = Program::getApp()->getTargetStateStore();
NodeStore* nodes = Program::getApp()->getStorageNodes();
FhgfsOpsErr retVal = FhgfsOpsErr_SUCCESS;
PathInfo pathInfo;
inode.getPathInfo(&pathInfo);
dynAttribs.resize(targetIDs->size());
// send request to each node and receive the response message
unsigned currentNodeIndex = 0;
for(UInt16VectorConstIter iter = targetIDs->begin();
iter != targetIDs->end();
iter++, currentNodeIndex++)
{
uint16_t targetID = *iter;
std::string entryID = entryInfo->getEntryID();
int64_t truncPos = getNodeLocalTruncPos(filesize, *pattern, currentNodeIndex);
TruncLocalFileMsg truncMsg(truncPos, entryID, targetID, &pathInfo);
if (useQuota)
truncMsg.setUserdataForQuota(inode.getUserID(), inode.getGroupID());
truncMsg.setMsgHeaderUserID(msgUserID);
RequestResponseArgs rrArgs(NULL, &truncMsg, NETMSGTYPE_TruncLocalFileResp);
RequestResponseTarget rrTarget(targetID, targetMapper, nodes);
rrTarget.setTargetStates(targetStates);
// send request to node and receive response
FhgfsOpsErr requestRes = MessagingTk::requestResponseTarget(&rrTarget, &rrArgs);
if(requestRes != FhgfsOpsErr_SUCCESS)
{ // communication error
LogContext(logContext).log(Log_WARNING,
"Communication with storage target failed: " + StringTk::uintToStr(targetID) + "; "
"fileID: " + inode.getEntryID());
if(retVal == FhgfsOpsErr_SUCCESS)
retVal = requestRes;
continue;
}
// correct response type received
TruncLocalFileRespMsg* truncRespMsg = (TruncLocalFileRespMsg*)rrArgs.outRespMsg.get();
// set current dynamic attribs (even if result not success, because then storageVersion==0)
DynamicFileAttribs currentDynAttribs(truncRespMsg->getStorageVersion(),
truncRespMsg->getFileSize(), truncRespMsg->getAllocedBlocks(),
truncRespMsg->getModificationTimeSecs(), truncRespMsg->getLastAccessTimeSecs() );
dynAttribs[currentNodeIndex] = currentDynAttribs;
FhgfsOpsErr chunkTruncRes = truncRespMsg->getResult();
if(chunkTruncRes != FhgfsOpsErr_SUCCESS)
{ // error: chunk file not truncated
LogContext(logContext).log(Log_WARNING,
"Storage target failed to truncate chunk file: " + StringTk::uintToStr(targetID) + "; "
"fileID: " + inode.getEntryID() + "; "
"Error: " + boost::lexical_cast<std::string>(chunkTruncRes));
if(retVal == FhgfsOpsErr_SUCCESS)
retVal = truncRespMsg->getResult();
continue;
}
// success: local inode unlinked
LOG_DEBUG(logContext, Log_DEBUG,
"Storage target truncated chunk file: " + StringTk::uintToStr(targetID) + "; "
"fileID: " + inode.getEntryID());
}
inode.setDynAttribs(dynAttribs); // the actual update
if(unlikely( (retVal != FhgfsOpsErr_SUCCESS) && (retVal != FhgfsOpsErr_TOOBIG) ) )
{
LogContext(logContext).log(Log_WARNING,
"Problems occurred during truncation of chunk files. "
"fileID: " + inode.getEntryID());
}
return retVal;
}
/**
* Note: This also updates dynamic file attribs.
*
* @param msgUserID only used for msg header info.
*/
FhgfsOpsErr MsgHelperTrunc::truncChunkFileParallel(FileInode& inode, EntryInfo* entryInfo,
int64_t filesize, bool useQuota, unsigned msgUserID, DynamicFileAttribsVec& dynAttribs)
{
const char* logContext = "Trunc chunk file helper";
App* app = Program::getApp();
MultiWorkQueue* slaveQ = app->getCommSlaveQueue();
StripePattern* pattern = inode.getStripePattern();
const UInt16Vector* targetIDs = pattern->getStripeTargetIDs();
size_t numTargetWorks = targetIDs->size();
FhgfsOpsErr retVal = FhgfsOpsErr_SUCCESS;
FhgfsOpsErrVec nodeResults(numTargetWorks);
SynchronizedCounter counter;
// generate work for storage targets...
PathInfo pathInfo;
inode.getPathInfo(&pathInfo);
dynAttribs.resize(numTargetWorks);
for(size_t i=0; i < numTargetWorks; i++)
{
int64_t localTruncPos = getNodeLocalTruncPos(filesize, *pattern, i);
TruncChunkFileWork* work = new TruncChunkFileWork(inode.getEntryID(), localTruncPos,
pattern, (*targetIDs)[i], &pathInfo, &dynAttribs[i], &(nodeResults[i]), &counter);
if (useQuota)
work->setUserdataForQuota(inode.getUserID(), inode.getGroupID());
work->setMsgUserID(msgUserID);
slaveQ->addDirectWork(work);
}
// wait for work completion...
counter.waitForCount(numTargetWorks);
// check target results...
for(size_t i=0; i < numTargetWorks; i++)
{
FhgfsOpsErr nodeResult = nodeResults[i];
if(unlikely(nodeResult != FhgfsOpsErr_SUCCESS) )
{
LogContext(logContext).log(Log_WARNING,
"Problems occurred during truncation of storage server chunk files. "
"File: " + inode.getEntryID());
retVal = nodeResult;
break;
}
}
inode.setDynAttribs(dynAttribs); // the actual update
return retVal;
}
/**
* Note: Makes only sense to call this before opening the file (because afterwards the filesize
* will be unknown).
*
* @param filesize desired trunc filesize
* @return true if current filesize is unknown or not equal to given filesize
*/
bool MsgHelperTrunc::isTruncChunkRequired(EntryInfo* entryInfo, int64_t filesize)
{
MetaStore* metaStore = Program::getApp()->getMetaStore();
StatData statData;
FhgfsOpsErr statRes = metaStore->stat(entryInfo, true, statData);
if( (statRes == FhgfsOpsErr_PATHNOTEXISTS) && (!filesize) )
return false; // does not exist => no zero-trunc necessary
if( (statRes == FhgfsOpsErr_SUCCESS) && (filesize == statData.getFileSize() ) )
return false; // exists and already has desired size
return true;
}
int64_t MsgHelperTrunc::getNodeLocalOffset(int64_t pos, int64_t chunkSize, size_t numNodes,
size_t stripeNodeIndex)
{
// (note: we can use "& ... -1" here instead if "%", because chunkSize is a po wer of two
int64_t posModChunkSize = pos & (chunkSize - 1);
int64_t chunkStart = pos - posModChunkSize - (stripeNodeIndex*chunkSize);
return ( (chunkStart / numNodes) + posModChunkSize);
}
int64_t MsgHelperTrunc::getNodeLocalTruncPos(int64_t pos, StripePattern& pattern,
size_t stripeNodeIndex)
{
int64_t truncPos;
size_t numTargets = pattern.getStripeTargetIDs()->size();
size_t mainNodeIndex = pattern.getStripeTargetIndex(pos);
int64_t mainNodeLocalOffset = getNodeLocalOffset(
pos, pattern.getChunkSize(), numTargets, mainNodeIndex);
if(stripeNodeIndex < mainNodeIndex)
truncPos = pattern.getNextChunkStart(mainNodeLocalOffset);
else
if(stripeNodeIndex == mainNodeIndex)
truncPos = mainNodeLocalOffset;
else
truncPos = pattern.getChunkStart(mainNodeLocalOffset);
truncPos = BEEGFS_MAX(truncPos, 0);
return truncPos;
}

View File

@@ -0,0 +1,33 @@
#pragma once
#include <common/Common.h>
#include <storage/MetaStore.h>
class MsgHelperTrunc
{
public:
static FhgfsOpsErr truncFile(EntryInfo* entryInfo, int64_t filesize,
bool useQuota, unsigned msgUserID, DynamicFileAttribsVec& dynAttribs);
static FhgfsOpsErr truncChunkFile(FileInode& inode, EntryInfo* entryInfo, int64_t filesize,
bool useQuota, unsigned userIDHint, DynamicFileAttribsVec& dynAttribs);
static bool isTruncChunkRequired(EntryInfo* entryInfo, int64_t filesize);
private:
MsgHelperTrunc() {}
static FhgfsOpsErr truncChunkFileSequential(FileInode& inode, EntryInfo* entryInfo,
int64_t filesize, bool useQuota, unsigned msgUserID, DynamicFileAttribsVec& dynAttribs);
static FhgfsOpsErr truncChunkFileParallel(FileInode& inode, EntryInfo* entryInfo,
int64_t filesize, bool useQuota, unsigned msgUserID, DynamicFileAttribsVec& dynAttribs);
static int64_t getNodeLocalOffset(int64_t pos, int64_t chunkSize, size_t numNodes,
size_t stripeNodeIndex);
static int64_t getNodeLocalTruncPos(int64_t pos, StripePattern& pattern,
size_t stripeNodeIndex);
public:
// inliners
};

View File

@@ -0,0 +1,287 @@
#include <common/toolkit/MessagingTk.h>
#include <common/net/message/storage/creating/UnlinkLocalFileMsg.h>
#include <common/net/message/storage/creating/UnlinkLocalFileRespMsg.h>
#include <components/ModificationEventFlusher.h>
#include <components/worker/UnlinkChunkFileWork.h>
#include <net/msghelpers/MsgHelperMkFile.h>
#include <program/Program.h>
#include "MsgHelperUnlink.h"
/**
* Wrapper for unlinkMetaFile() and unlinkChunkFiles().
*
* @param msgUserID only used in msg header info.
*/
FhgfsOpsErr MsgHelperUnlink::unlinkFile(DirInode& parentDir, const std::string& removeName,
unsigned msgUserID)
{
std::unique_ptr<FileInode> unlinkedInode;
unsigned numHardlinks; // Not used here!
FhgfsOpsErr unlinkMetaRes = unlinkMetaFile(parentDir, removeName, &unlinkedInode, numHardlinks);
/* note: if the file is still opened or if there are/were hardlinks then unlinkedInode will be
NULL even on FhgfsOpsErr_SUCCESS */
if (unlinkMetaRes == FhgfsOpsErr_SUCCESS && unlinkedInode)
unlinkMetaRes = unlinkChunkFiles(unlinkedInode.release(), msgUserID);
return unlinkMetaRes;
}
/**
* Unlink file in metadata store.
*
* @param outInitialHardlinkCount will be set to the initial hardlink count of the file
* inode before unlinking.
*
* @return if this returns success and outUnlinkedFile is set, then the caller also needs to unlink
* the chunk files via unlinkChunkFiles().
*/
FhgfsOpsErr MsgHelperUnlink::unlinkMetaFile(DirInode& parentDir,
const std::string& removeName, std::unique_ptr<FileInode>* outUnlinkedFile,
unsigned& outInitialHardlinkCount)
{
MetaStore* metaStore = Program::getApp()->getMetaStore();
ModificationEventFlusher* modEventFlusher = Program::getApp()->getModificationEventFlusher();
bool modEventLoggingEnabled = modEventFlusher->isLoggingEnabled();
EntryInfo entryInfo;
FhgfsOpsErr unlinkMetaRes = metaStore->unlinkFile(parentDir, removeName,
&entryInfo, outUnlinkedFile, outInitialHardlinkCount);
if (modEventLoggingEnabled)
{
std::string entryID = entryInfo.getEntryID();
modEventFlusher->add(ModificationEvent_FILEREMOVED, entryID);
}
return unlinkMetaRes;
}
/**
* Decrement hardlink count and
* Unlink file's inode if hardlink count reaches zero
*
* @return Success if hardlink count decrement is successful
* and if it became zero then fileinode removal is also sucessful (outUnlinkInode will be
* set in this case which will be later used to remove chunk files). If file is in use and
* its last entry getting removed then inode will be linked with disposal directory for later
* removal
*
*/
FhgfsOpsErr MsgHelperUnlink::unlinkFileInode(EntryInfo* delFileInfo,
std::unique_ptr<FileInode>* outUnlinkedFile, unsigned& outInitialHardlinkCount)
{
MetaStore* metaStore = Program::getApp()->getMetaStore();
FhgfsOpsErr unlinkRes = metaStore->unlinkFileInode(delFileInfo, outUnlinkedFile,
outInitialHardlinkCount);
return unlinkRes;
}
/**
* Unlink (storage) chunk files.
*
* Note: If chunk files unlink fails, this method will create a disposal entry.
*
* @param unlinkedInode will be deleted inside this method or owned by another object, so caller
* may no longer access it after calling this.
* @param msgUserID only used in msg header info.
*/
FhgfsOpsErr MsgHelperUnlink::unlinkChunkFiles(FileInode* unlinkedInode, unsigned msgUserID)
{
const char* logContext = "Delete chunk files";
MetaStore* metaStore = Program::getApp()->getMetaStore();
FhgfsOpsErr retVal;
retVal = unlinkChunkFilesInternal(*unlinkedInode, msgUserID);
if(retVal != FhgfsOpsErr_SUCCESS)
{ /* Failed to unlink storage chunk files => add file to the disposable store to try
* again later. */
LogContext(logContext).logErr("Failed to delete all chunk files of ID: " +
unlinkedInode->getEntryID() + ". Added disposal entry.");
retVal = metaStore->insertDisposableFile(unlinkedInode); // destructs unlinkedInode
}
else
{ // success (local files unlinked)
delete(unlinkedInode);
}
return retVal;
}
/**
* Wrapper to decide parallel or sequential chunks unlink.
*
* @param msgUserID only used in msg header info.
*/
FhgfsOpsErr MsgHelperUnlink::unlinkChunkFilesInternal(FileInode& file, unsigned msgUserID)
{
StripePattern* pattern = file.getStripePattern();
if( (pattern->getStripeTargetIDs()->size() > 1) ||
(pattern->getPatternType() == StripePatternType_BuddyMirror) )
return unlinkChunkFileParallel(file, msgUserID);
else
return unlinkChunkFileSequential(file, msgUserID);
}
/**
* Note: This method does not work for mirrored files; use unlinkChunkFileParallel() for those.
*
* @param msgUserID only used in msg header info.
*/
FhgfsOpsErr MsgHelperUnlink::unlinkChunkFileSequential(FileInode& inode, unsigned msgUserID)
{
std::string logContext("Unlink Helper (unlink chunk file S [" + inode.getEntryID() + "])");
StripePattern* pattern = inode.getStripePattern();
const UInt16Vector* targetIDs = pattern->getStripeTargetIDs();
TargetMapper* targetMapper = Program::getApp()->getTargetMapper();
TargetStateStore* targetStates = Program::getApp()->getTargetStateStore();
NodeStore* nodes = Program::getApp()->getStorageNodes();
FhgfsOpsErr retVal = FhgfsOpsErr_SUCCESS;
std::string fileID(inode.getEntryID());
PathInfo pathInfo;
inode.getPathInfo(&pathInfo);
// send request to each node and receive the response message
for(UInt16VectorConstIter iter = targetIDs->begin();
iter != targetIDs->end();
iter++)
{
uint16_t targetID = *iter;
UnlinkLocalFileMsg unlinkMsg(fileID, targetID, &pathInfo);
unlinkMsg.setMsgHeaderUserID(msgUserID);
RequestResponseArgs rrArgs(NULL, &unlinkMsg, NETMSGTYPE_UnlinkLocalFileResp);
RequestResponseTarget rrTarget(targetID, targetMapper, nodes);
rrTarget.setTargetStates(targetStates);
// send request to node and receive response
FhgfsOpsErr requestRes = MessagingTk::requestResponseTarget(&rrTarget, &rrArgs);
if(requestRes != FhgfsOpsErr_SUCCESS)
{ // communication failed
if( (requestRes == FhgfsOpsErr_UNKNOWNNODE) ||
(requestRes == FhgfsOpsErr_UNKNOWNTARGET) )
{ /* special case: for unlink, we don't treat this as error to allow easy deletion of
files after intentional target removal. */
LogContext(logContext).log(Log_WARNING,
"Unable to resolve storage node targetID: " + StringTk::uintToStr(targetID) );
continue;
}
LogContext(logContext).log(Log_WARNING,
"Communication with storage target failed: " + StringTk::uintToStr(targetID) + "; "
"fileID: " + inode.getEntryID());
if(retVal == FhgfsOpsErr_SUCCESS)
retVal = requestRes;
continue;
}
// correct response type received
UnlinkLocalFileRespMsg* unlinkRespMsg = (UnlinkLocalFileRespMsg*)rrArgs.outRespMsg.get();
FhgfsOpsErr unlinkResult = unlinkRespMsg->getResult();
if(unlinkResult != FhgfsOpsErr_SUCCESS)
{ // error: local inode not unlinked
LogContext(logContext).log(Log_WARNING,
"Storage target failed to unlink chunk file: " + StringTk::uintToStr(targetID) + "; "
"fileID: " + inode.getEntryID());
if(retVal == FhgfsOpsErr_SUCCESS)
retVal = unlinkResult;
continue;
}
// success: local inode unlinked
LOG_DEBUG(logContext, Log_DEBUG,
"Storage targed unlinked chunk file: " + StringTk::uintToStr(targetID) + "; "
"fileID: " + inode.getEntryID());
}
if(unlikely(retVal != FhgfsOpsErr_SUCCESS) )
LogContext(logContext).log(Log_WARNING,
"Problems occurred during unlinking of the chunk files. "
"fileID: " + inode.getEntryID());
return retVal;
}
/**
* @param msgUserID only used in msg header info.
*/
FhgfsOpsErr MsgHelperUnlink::unlinkChunkFileParallel(FileInode& inode, unsigned msgUserID)
{
std::string logContext("Unlink Helper (unlink chunk file [" + inode.getEntryID() + "])");
App* app = Program::getApp();
MultiWorkQueue* slaveQ = app->getCommSlaveQueue();
StripePattern* pattern = inode.getStripePattern();
const UInt16Vector* targetIDs = pattern->getStripeTargetIDs();
size_t numTargetWorks = targetIDs->size();
FhgfsOpsErr retVal = FhgfsOpsErr_SUCCESS;
FhgfsOpsErrVec nodeResults(numTargetWorks);
SynchronizedCounter counter;
PathInfo pathInfo;
inode.getPathInfo(&pathInfo);
// generate work for storage targets...
for(size_t i=0; i < numTargetWorks; i++)
{
UnlinkChunkFileWork* work = new UnlinkChunkFileWork(inode.getEntryID(), pattern,
(*targetIDs)[i], &pathInfo, &(nodeResults[i]), &counter);
work->setMsgUserID(msgUserID);
slaveQ->addDirectWork(work);
}
// wait for work completion...
counter.waitForCount(numTargetWorks);
// check target results...
for(size_t i=0; i < numTargetWorks; i++)
{
if(unlikely(nodeResults[i] != FhgfsOpsErr_SUCCESS) )
{
if( (nodeResults[i] == FhgfsOpsErr_UNKNOWNNODE) ||
(nodeResults[i] == FhgfsOpsErr_UNKNOWNTARGET) )
{ /* we don't return this as an error to the user, because the node/target was probably
removed intentionally (and either way the rest of this inode is lost now) */
continue;
}
LogContext(logContext).log(Log_WARNING,
"Problems occurred during unlinking of chunk files.");
retVal = nodeResults[i];
goto error_exit;
}
}
error_exit:
return retVal;
}

View File

@@ -0,0 +1,32 @@
#pragma once
#include <common/storage/Path.h>
#include <common/Common.h>
#include <storage/MetaStore.h>
class MsgHelperUnlink
{
public:
static FhgfsOpsErr unlinkFile(DirInode& parentDir, const std::string& removeName,
unsigned msgUserID);
static FhgfsOpsErr unlinkMetaFile(DirInode& parentDir, const std::string& removeName,
std::unique_ptr<FileInode>* outUnlinkedFile, unsigned& outInitialHardlinkCount);
static FhgfsOpsErr unlinkFileInode(EntryInfo* delFileInfo,
std::unique_ptr<FileInode>* outUnlinkedFile, unsigned& outInitialHardlinkCount);
static FhgfsOpsErr unlinkChunkFiles(FileInode* unlinkedInode, unsigned msgUserID);
private:
MsgHelperUnlink() {}
static FhgfsOpsErr unlinkChunkFileSequential(FileInode& inode, unsigned msgUserID);
static FhgfsOpsErr unlinkChunkFileParallel(FileInode& inode, unsigned msgUserID);
static FhgfsOpsErr insertDisposableFile(FileInode& file);
public:
// inliners
static FhgfsOpsErr unlinkChunkFilesInternal(FileInode& file, unsigned msgUserID);
};

View File

@@ -0,0 +1,259 @@
#include <common/storage/EntryInfo.h>
#include <program/Program.h>
#include <storage/MetaStore.h>
#include <toolkit/XAttrTk.h>
#include "MsgHelperXAttr.h"
#include <sys/xattr.h>
const std::string MsgHelperXAttr::CURRENT_DIR_FILENAME = std::string(".");
const ssize_t MsgHelperXAttr::MAX_VALUE_SIZE = 60*1024;
std::pair<FhgfsOpsErr, StringVector> MsgHelperXAttr::listxattr(EntryInfo* entryInfo)
{
MetaStore* metaStore = Program::getApp()->getMetaStore();
if (entryInfo->getEntryType() == DirEntryType_REGULARFILE && !entryInfo->getIsInlined())
{
auto [inode, referenceRes] = metaStore->referenceFile(entryInfo);
if (!inode)
return {referenceRes, {}};
auto result = inode->listXAttr();
metaStore->releaseFile(entryInfo->getParentEntryID(), inode);
return result;
}
DirInode* dir = metaStore->referenceDir(
DirEntryType_ISDIR(entryInfo->getEntryType())
? entryInfo->getEntryID()
: entryInfo->getParentEntryID(),
entryInfo->getIsBuddyMirrored(),
true);
if (!dir)
return {FhgfsOpsErr_INTERNAL, {}};
auto result = dir->listXAttr(
DirEntryType_ISDIR(entryInfo->getEntryType())
? nullptr
: entryInfo);
metaStore->releaseDir(dir->getID());
return result;
}
std::tuple<FhgfsOpsErr, std::vector<char>, ssize_t> MsgHelperXAttr::getxattr(EntryInfo* entryInfo,
const std::string& name, size_t maxSize)
{
std::tuple<FhgfsOpsErr, std::vector<char>, ssize_t> result;
MetaStore* metaStore = Program::getApp()->getMetaStore();
if (entryInfo->getEntryType() == DirEntryType_REGULARFILE && !entryInfo->getIsInlined())
{
auto [inode, referenceRes] = metaStore->referenceFile(entryInfo);
if (!inode)
return std::make_tuple(referenceRes, std::vector<char>(), ssize_t(0));
result = inode->getXAttr(name, maxSize);
metaStore->releaseFile(entryInfo->getParentEntryID(), inode);
}
else
{
DirInode* dir = metaStore->referenceDir(
DirEntryType_ISDIR(entryInfo->getEntryType())
? entryInfo->getEntryID()
: entryInfo->getParentEntryID(),
entryInfo->getIsBuddyMirrored(),
true);
if (!dir)
return std::make_tuple(FhgfsOpsErr_INTERNAL, std::vector<char>(), ssize_t(0));
result = dir->getXAttr(
DirEntryType_ISDIR(entryInfo->getEntryType())
? nullptr
: entryInfo,
name,
maxSize);
metaStore->releaseDir(dir->getID());
}
// Attribute might be too large for NetMessage.
if (std::get<1>(result).size() > size_t(MsgHelperXAttr::MAX_VALUE_SIZE))
{
// Note: This can happen if it was set with an older version of the client which did not
// include the size check.
return std::make_tuple(FhgfsOpsErr_INTERNAL, std::vector<char>(), ssize_t(0));
}
return result;
}
FhgfsOpsErr MsgHelperXAttr::removexattr(EntryInfo* entryInfo, const std::string& name)
{
MetaStore* metaStore = Program::getApp()->getMetaStore();
if (entryInfo->getEntryType() == DirEntryType_REGULARFILE && !entryInfo->getIsInlined())
{
auto [inode, referenceRes] = metaStore->referenceFile(entryInfo);
if (!inode)
return referenceRes;
auto result = inode->removeXAttr(entryInfo, name);
metaStore->releaseFile(entryInfo->getParentEntryID(), inode);
return result;
}
DirInode* dir = metaStore->referenceDir(
DirEntryType_ISDIR(entryInfo->getEntryType())
? entryInfo->getEntryID()
: entryInfo->getParentEntryID(),
entryInfo->getIsBuddyMirrored(),
true);
if (!dir)
return FhgfsOpsErr_INTERNAL;
auto result = dir->removeXAttr(
DirEntryType_ISDIR(entryInfo->getEntryType())
? nullptr
: entryInfo,
name);
metaStore->releaseDir(dir->getID());
return result;
}
FhgfsOpsErr MsgHelperXAttr::setxattr(EntryInfo* entryInfo, const std::string& name,
const CharVector& value, int flags)
{
MetaStore* metaStore = Program::getApp()->getMetaStore();
if (entryInfo->getEntryType() == DirEntryType_REGULARFILE && !entryInfo->getIsInlined())
{
auto [inode, referenceRes] = metaStore->referenceFile(entryInfo);
if (!inode)
return referenceRes;
auto result = inode->setXAttr(entryInfo, name, value, flags);
metaStore->releaseFile(entryInfo->getParentEntryID(), inode);
return result;
}
DirInode* dir = metaStore->referenceDir(
DirEntryType_ISDIR(entryInfo->getEntryType())
? entryInfo->getEntryID()
: entryInfo->getParentEntryID(),
entryInfo->getIsBuddyMirrored(),
true);
if (!dir)
return FhgfsOpsErr_INTERNAL;
auto result = dir->setXAttr(
DirEntryType_ISDIR(entryInfo->getEntryType())
? nullptr
: entryInfo,
name,
value,
flags);
metaStore->releaseDir(dir->getID());
return result;
}
FhgfsOpsErr MsgHelperXAttr::StreamXAttrState::streamXattrFn(Socket* socket, void* context)
{
StreamXAttrState* state = static_cast<StreamXAttrState*>(context);
return state->streamXattr(socket);
}
FhgfsOpsErr MsgHelperXAttr::StreamXAttrState::streamXattr(Socket* socket) const
{
for (auto xattr = names.cbegin(); xattr != names.cend(); ++xattr)
{
const auto& name = *xattr;
CharVector value;
FhgfsOpsErr getRes;
if (entryInfo)
std::tie(getRes, value, std::ignore) = getxattr(entryInfo, name, XATTR_SIZE_MAX);
else
std::tie(getRes, value, std::ignore) = XAttrTk::getUserXAttr(path, name, XATTR_SIZE_MAX);
if (getRes != FhgfsOpsErr_SUCCESS)
{
uint32_t endMark = HOST_TO_LE_32(-1);
socket->send(&endMark, sizeof(endMark), 0);
return getRes;
}
uint32_t nameLen = HOST_TO_LE_32(name.size());
socket->send(&nameLen, sizeof(nameLen), 0);
socket->send(&name[0], nameLen, 0);
uint64_t valueLen = HOST_TO_LE_64(value.size());
socket->send(&valueLen, sizeof(valueLen), 0);
socket->send(&value[0], value.size(), 0);
}
uint32_t endMark = HOST_TO_LE_32(0);
socket->send(&endMark, sizeof(endMark), 0);
return FhgfsOpsErr_SUCCESS;
}
FhgfsOpsErr MsgHelperXAttr::StreamXAttrState::readNextXAttr(Socket* socket, std::string& name,
CharVector& value)
{
uint32_t nameLen;
Config* cfg = Program::getApp()->getConfig();
ssize_t nameLenRes = socket->recvExactT(&nameLen, sizeof(nameLen), 0, cfg->getConnMsgShortTimeout());
if (nameLenRes < 0 || size_t(nameLenRes) < sizeof(nameLen))
return FhgfsOpsErr_COMMUNICATION;
nameLen = LE_TO_HOST_32(nameLen);
if (nameLen == 0)
return FhgfsOpsErr_SUCCESS;
else if (nameLen == uint32_t(-1))
return FhgfsOpsErr_COMMUNICATION;
if (nameLen > XATTR_NAME_MAX)
return FhgfsOpsErr_RANGE;
name.resize(nameLen);
if (socket->recvExactT(&name[0], nameLen, 0, cfg->getConnMsgShortTimeout()) != (ssize_t) name.size())
return FhgfsOpsErr_COMMUNICATION;
uint64_t valueLen;
ssize_t valueLenRes = socket->recvExactT(&valueLen, sizeof(valueLen), 0,
cfg->getConnMsgShortTimeout());
if (valueLenRes < 0 || size_t(valueLenRes) != sizeof(valueLen))
return FhgfsOpsErr_COMMUNICATION;
valueLen = LE_TO_HOST_64(valueLen);
if (valueLen > XATTR_SIZE_MAX)
return FhgfsOpsErr_RANGE;
value.resize(valueLen);
if (socket->recvExactT(&value[0], valueLen, 0, cfg->getConnMsgShortTimeout()) != ssize_t(valueLen))
return FhgfsOpsErr_COMMUNICATION;
return FhgfsOpsErr_AGAIN;
}

View File

@@ -0,0 +1,48 @@
#pragma once
#include <common/storage/StorageErrors.h>
class EntryInfo; // forward declaration
class Socket;
class MsgHelperXAttr
{
public:
static std::pair<FhgfsOpsErr, StringVector> listxattr(EntryInfo* entryInfo);
static std::tuple<FhgfsOpsErr, std::vector<char>, ssize_t> getxattr(EntryInfo* entryInfo,
const std::string& name, size_t maxSize);
static FhgfsOpsErr removexattr(EntryInfo* entryInfo, const std::string& name);
static FhgfsOpsErr setxattr(EntryInfo* entryInfo, const std::string& name,
const CharVector& value, int flags);
static const std::string CURRENT_DIR_FILENAME;
static const ssize_t MAX_VALUE_SIZE;
class StreamXAttrState
{
public:
StreamXAttrState():
entryInfo(nullptr)
{}
StreamXAttrState(EntryInfo& entryInfo, StringVector names):
entryInfo(&entryInfo), names(std::move(names))
{}
StreamXAttrState(std::string path, StringVector names):
entryInfo(nullptr), path(std::move(path)), names(std::move(names))
{}
static FhgfsOpsErr streamXattrFn(Socket* socket, void* context);
static FhgfsOpsErr readNextXAttr(Socket* socket, std::string& name, CharVector& value);
private:
EntryInfo* entryInfo;
std::string path;
StringVector names;
FhgfsOpsErr streamXattr(Socket* socket) const;
};
};