New upstream version 8.1.0

This commit is contained in:
geos_one
2025-08-10 01:34:16 +02:00
commit c891bb7105
4398 changed files with 838833 additions and 0 deletions

View File

@@ -0,0 +1,348 @@
#include "Cassandra.h"
#include <common/storage/StorageTargetInfo.h>
#include <common/toolkit/StringTk.h>
#include <exception/DatabaseException.h>
#include <chrono>
#include <thread>
static const std::string libVersion = "2.9";
template<typename T>
std::function<T> loadSymbol(void* libHandle, const char* name)
{
dlerror();
auto f = dlsym(libHandle, name);
const char* error = dlerror();
if (error != NULL)
throw std::runtime_error("Couldn't load symbol: " + std::string(error)
+ "\nThe cassandra plugin requires the datastax client library version " + libVersion
+ ".");
return reinterpret_cast<T(*)>(f);
}
Cassandra::Cassandra(Config config) :
cluster(nullptr, [this](CassCluster* c){cluster_free(c);}),
session(nullptr, [this](CassSession* s){session_free(s);}),
batch(nullptr, [this](CassBatch* b){batch_free(b);}),
config(std::move(config)),
libHandle(nullptr, dlclose),
numQueries(0)
{
// Load datastax cassandra library
dlerror();
libHandle.reset(dlopen("libcassandra.so", RTLD_NOW));
const char* error = dlerror();
if (libHandle == NULL || error != NULL)
{
throw std::runtime_error("Couldn't load cassandra client library (libcassandra.so): "
+ std::string(error) + "\nThe cassandra plugin requires the datastax client library"
+ " version " + libVersion + ".");
}
// load used symbols
cluster_new = loadSymbol<decltype(cass_cluster_new)>(
libHandle.get(), "cass_cluster_new");
cluster_free = loadSymbol<decltype(cass_cluster_free)>(
libHandle.get(), "cass_cluster_free");
session_new = loadSymbol<decltype(cass_session_new)>(
libHandle.get(), "cass_session_new");
session_free = loadSymbol<decltype(cass_session_free)>(
libHandle.get(), "cass_session_free");
batch_new = loadSymbol<decltype(cass_batch_new)>(
libHandle.get(), "cass_batch_new");
batch_free = loadSymbol<decltype(cass_batch_free)>(
libHandle.get(), "cass_batch_free");
batch_add_statement = loadSymbol<decltype(cass_batch_add_statement)>(
libHandle.get(), "cass_batch_add_statement");
cluster_set_contact_points = loadSymbol<decltype(cass_cluster_set_contact_points)>(
libHandle.get(), "cass_cluster_set_contact_points");
cluster_set_port = loadSymbol<decltype(cass_cluster_set_port)>(
libHandle.get(), "cass_cluster_set_port");
session_connect = loadSymbol<decltype(cass_session_connect)>(
libHandle.get(), "cass_session_connect");
session_execute = loadSymbol<decltype(cass_session_execute)>(
libHandle.get(), "cass_session_execute");
session_execute_batch = loadSymbol<decltype(cass_session_execute_batch)>(
libHandle.get(), "cass_session_execute_batch");
future_error_code = loadSymbol<decltype(cass_future_error_code)>(
libHandle.get(), "cass_future_error_code");
future_error_message = loadSymbol<decltype(cass_future_error_message)>(
libHandle.get(), "cass_future_error_message");
future_free = loadSymbol<decltype(cass_future_free)>(
libHandle.get(), "cass_future_free");
statement_new = loadSymbol<decltype(cass_statement_new)>(
libHandle.get(), "cass_statement_new");
statement_free = loadSymbol<decltype(cass_statement_free)>(
libHandle.get(), "cass_statement_free");
cluster.reset(cluster_new());
session.reset(session_new());
batch.reset(batch_new(CASS_BATCH_TYPE_LOGGED));
cluster_set_contact_points(cluster.get(), this->config.host.c_str());
cluster_set_port(cluster.get(), this->config.port);
unsigned tries = 0;
while (true)
{
auto connectFuture = std::unique_ptr<CassFuture, decltype(future_free)>(
session_connect(session.get(), cluster.get()), future_free);
CassError err = future_error_code(connectFuture.get());
if (err == CASS_OK)
break;
const char* message;
size_t length;
future_error_message(connectFuture.get(), &message, &length);
LOG(DATABASE, ERR, "Couldn't connect to cassandra database: " + std::string(message));
tries++;
if (tries >= connectionRetries)
throw DatabaseException("Connection to cassandra database failed.");
else
LOG(DATABASE, WARNING, "Retrying in 10 seconds.");
std::this_thread::sleep_for(std::chrono::seconds(10));
}
// Create and switch to keyspace
query("CREATE KEYSPACE IF NOT EXISTS " + this->config.database + " WITH "
+ "replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};");
query("USE " + this->config.database + ";");
// Create tables
query("CREATE TABLE IF NOT EXISTS meta ("
"time timestamp, nodeNumID int, nodeID varchar, isResponding boolean, "
"indirectWorkListSize int, directWorkListSize int, PRIMARY KEY(time, nodeNumID));");
query("CREATE TABLE IF NOT EXISTS highResMeta ("
"time timestamp, nodeNumID int, nodeID varchar, workRequests int, "
"queuedRequests int, netSendBytes int, netRecvBytes int, PRIMARY KEY(time, nodeNumID));");
query("CREATE TABLE IF NOT EXISTS storage ("
"time timestamp, nodeNumID int, nodeID varchar, isResponding boolean, "
"indirectWorkListSize int, directWorkListSize int, "
"diskSpaceTotal bigint, diskSpaceFree bigint, PRIMARY KEY(time, nodeNumID));");
query("CREATE TABLE IF NOT EXISTS highResStorage ("
"time timestamp, nodeNumID int, nodeID varchar, workRequests int, "
"queuedRequests int, diskWriteBytes int, diskReadBytes int, "
"netSendBytes int, netRecvBytes int, PRIMARY KEY(time, nodeNumID));");
query("CREATE TABLE IF NOT EXISTS storageTargetData ("
"time timestamp, nodeNumID int, nodeID varchar, storageTargetID int, "
"diskSpaceTotal bigint, diskSpaceFree bigint, inodesTotal int, inodesFree int, "
"PRIMARY KEY(time, nodeNumID));");
query("CREATE TABLE IF NOT EXISTS metaClientOpsByNode ("
"time timestamp, node varchar, ops map<varchar,int> ,"
"PRIMARY KEY(time, node));");
query("CREATE TABLE IF NOT EXISTS storageClientOpsByNode ("
"time timestamp, node varchar, ops map<varchar,int> ,"
"PRIMARY KEY(time, node));");
query("CREATE TABLE IF NOT EXISTS metaClientOpsByUser ("
"time timestamp, user varchar, ops map<varchar,int> ,"
"PRIMARY KEY(time, user));");
query("CREATE TABLE IF NOT EXISTS storageClientOpsByUser ("
"time timestamp, user varchar, ops map<varchar,int> ,"
"PRIMARY KEY(time, user));");
}
void Cassandra::query(const std::string& query, bool waitForResult)
{
CassStatement* statement = statement_new(query.c_str(), 0);
auto queryFuture = std::unique_ptr<CassFuture, decltype(future_free)>(
session_execute(session.get(), statement), future_free);
statement_free(statement);
if (waitForResult)
{
CassError result = future_error_code(queryFuture.get());
if (result != CASS_OK)
{
const char* message;
size_t length;
future_error_message(queryFuture.get(), &message, &length);
throw DatabaseException("Query '" + query + "' failed: " + std::string(message));
}
}
}
void Cassandra::insertMetaNodeData(std::shared_ptr<Node> node, const MetaNodeDataContent& data)
{
std::ostringstream statement;
statement << "INSERT INTO meta ";
statement << "(time, nodeNumID, nodeID, isResponding";
if (data.isResponding)
statement << ", indirectWorkListSize, directWorkListSize) ";
else
statement << ") ";
statement << "VALUES (";
statement << "TOTIMESTAMP(NOW()), " << node->getNumID() << ", '" << node->getAlias() << "', ";
statement << std::boolalpha << data.isResponding;
if (data.isResponding)
statement << ", " << data.indirectWorkListSize << ", " << data.directWorkListSize << ") ";
else
statement << ") ";
statement << "USING TTL " << config.TTLSecs << ";";
appendQuery(statement.str());
}
void Cassandra::insertStorageNodeData(std::shared_ptr<Node> node,
const StorageNodeDataContent& data)
{
std::ostringstream statement;
statement << "INSERT INTO storage ";
statement << "(time, nodeNumID, nodeID, isResponding";
if (data.isResponding)
statement << ", indirectWorkListSize, directWorkListSize, diskSpaceTotal, diskSpaceFree) ";
else
statement << ") ";
statement << "VALUES (";
statement << "TOTIMESTAMP(NOW()), " << node->getNumID() << ", '" << node->getAlias() << "', ";
statement << std::boolalpha << data.isResponding;
if (data.isResponding)
statement << ", " << data.indirectWorkListSize << ", " << data.directWorkListSize << ", "
<< data.diskSpaceTotal << ", " << data.diskSpaceFree << ") ";
else
statement << ") ";
statement << "USING TTL " << config.TTLSecs << ";";
appendQuery(statement.str());
}
void Cassandra::insertHighResMetaNodeData(std::shared_ptr<Node> node,
const HighResolutionStats& data)
{
std::ostringstream statement;
statement << "INSERT INTO highResMeta ";
statement << "(time, nodeNumID, nodeID, workRequests, ";
statement << "queuedRequests, netSendBytes, netRecvBytes) VALUES (";
statement << data.rawVals.statsTimeMS << ", " << node->getNumID() << ", '" << node->getAlias() << "', ";
statement << data.incVals.workRequests << ", " << data.rawVals.queuedRequests << ", ";
statement << data.incVals.netSendBytes << ", " << data.incVals.netRecvBytes << ") ";
statement << "USING TTL " << config.TTLSecs << ";";
appendQuery(statement.str());
}
void Cassandra::insertHighResStorageNodeData(std::shared_ptr<Node> node,
const HighResolutionStats& data)
{
std::ostringstream statement;
statement << "INSERT INTO highResStorage ";
statement << "(time, nodeNumID, nodeID, workRequests, ";
statement << "queuedRequests, diskWriteBytes, diskReadBytes, netSendBytes, netRecvBytes) VALUES (";
statement << data.rawVals.statsTimeMS << ", " << node->getNumID() << ", '" << node->getAlias() << "', ";
statement << data.incVals.workRequests << ", " << data.rawVals.queuedRequests << ", ";
statement << data.incVals.diskWriteBytes << ", " << data.incVals.diskReadBytes << ", ";
statement << data.incVals.netSendBytes << ", " << data.incVals.netRecvBytes << ") ";
statement << "USING TTL " << config.TTLSecs << ";";
appendQuery(statement.str());
}
void Cassandra::insertStorageTargetsData(std::shared_ptr<Node> node,
const StorageTargetInfo& data)
{
std::ostringstream statement;
statement << "INSERT INTO storageTargetData ";
statement << "(time, nodeNumID, nodeID, storageTargetID, ";
statement << "diskSpaceTotal, diskSpaceFree, inodesTotal, inodesFree) VALUES (";
statement << "TOTIMESTAMP(NOW()), " << node->getNumID() << ", '" << node->getAlias() << "', ";
statement << data.getTargetID() << ", ";
statement << data.getDiskSpaceTotal() << ", " << data.getDiskSpaceFree() << ", ";
statement << data.getInodesTotal() << ", " << data.getInodesFree() << ") ";
statement << "USING TTL " << config.TTLSecs << ";";
appendQuery(statement.str());
}
void Cassandra::insertClientNodeData(const std::string& id, const NodeType nodeType,
const std::map<std::string, uint64_t>& opMap, bool perUser)
{
std::ostringstream statement;
statement << "INSERT INTO ";
if (perUser)
{
if (nodeType == NODETYPE_Meta)
statement << "metaClientOpsByUser";
else if (nodeType == NODETYPE_Storage)
statement << "storageClientOpsByUser";
else
throw DatabaseException("Invalid Nodetype given.");
statement << " (time, user, ops) VALUES (";
}
else
{
if (nodeType == NODETYPE_Meta)
statement << "metaClientOpsByNode";
else if (nodeType == NODETYPE_Storage)
statement << "storageClientOpsByNode";
else
throw DatabaseException("Invalid Nodetype given.");
statement << " (time, node, ops) VALUES (";
}
statement << "TOTIMESTAMP(NOW()), '" << id << "', {";
bool first = true;
for (auto iter = opMap.begin(); iter != opMap.end(); iter++)
{
if (iter->second == 0)
continue;
statement << (first ? "" : ",") << "'" << iter->first << "':" << iter->second;
first = false;
}
statement << "}) USING TTL " << config.TTLSecs << ";";
// if no fields are != 0, dont write anything
if (!first)
appendQuery(statement.str());
}
void Cassandra::appendQuery(const std::string& query)
{
const std::lock_guard<Mutex> lock(queryMutex);
CassStatement* statement = statement_new(query.c_str(), 0);
batch_add_statement(batch.get(), statement);
statement_free(statement);
numQueries++;
if (numQueries >= config.maxInsertsPerBatch)
{
writeUnlocked();
}
}
void Cassandra::write()
{
const std::lock_guard<Mutex> lock(queryMutex);
if(numQueries)
writeUnlocked();
}
void Cassandra::writeUnlocked()
{
CassFuture* batchFuture = session_execute_batch(session.get(), batch.get());
batch.reset(batch_new(CASS_BATCH_TYPE_LOGGED));
future_free(batchFuture);
LOG(DATABASE, DEBUG, "Sent queries to Cassandra.", numQueries);
numQueries = 0;
}

View File

@@ -0,0 +1,80 @@
#ifndef CASSANDRA_H_
#define CASSANDRA_H_
#include <common/nodes/NodeType.h>
#include <common/threading/Mutex.h>
#include <nodes/MetaNodeEx.h>
#include <nodes/StorageNodeEx.h>
#include <misc/TSDatabase.h>
#include <cassandra.h>
#include <dlfcn.h>
class Cassandra : public TSDatabase
{
public:
struct Config
{
std::string host;
int port;
std::string database;
unsigned maxInsertsPerBatch;
unsigned TTLSecs;
};
Cassandra(Config config);
virtual ~Cassandra() {};
virtual void insertMetaNodeData(
std::shared_ptr<Node> node, const MetaNodeDataContent& data) override;
virtual void insertStorageNodeData(
std::shared_ptr<Node> node, const StorageNodeDataContent& data) override;
virtual void insertHighResMetaNodeData(
std::shared_ptr<Node> node, const HighResolutionStats& data) override;
virtual void insertHighResStorageNodeData(
std::shared_ptr<Node> node, const HighResolutionStats& data) override;
virtual void insertStorageTargetsData(
std::shared_ptr<Node> node, const StorageTargetInfo& data) override;
virtual void insertClientNodeData(
const std::string& id, const NodeType nodeType,
const std::map<std::string, uint64_t>& opMap, bool perUser) override;
virtual void write() override;
private:
std::function<decltype(cass_cluster_new)> cluster_new;
std::function<decltype(cass_cluster_free)> cluster_free;
std::function<decltype(cass_session_new)> session_new;
std::function<decltype(cass_session_free)> session_free;
std::function<decltype(cass_batch_new)> batch_new;
std::function<decltype(cass_batch_free)> batch_free;
std::function<decltype(cass_batch_add_statement)> batch_add_statement;
std::function<decltype(cass_cluster_set_contact_points)> cluster_set_contact_points;
std::function<decltype(cass_cluster_set_port)> cluster_set_port;
std::function<decltype(cass_session_connect)> session_connect;
std::function<decltype(cass_session_execute)> session_execute;
std::function<decltype(cass_session_execute_batch)> session_execute_batch;
std::function<decltype(cass_future_error_code)> future_error_code;
std::function<decltype(cass_future_error_message)> future_error_message;
std::function<decltype(cass_future_free)> future_free;
std::function<decltype(cass_statement_new)> statement_new;
std::function<decltype(cass_statement_free)> statement_free;
std::unique_ptr<CassCluster, decltype(cluster_free)> cluster;
std::unique_ptr<CassSession, decltype(session_free)> session;
std::unique_ptr<CassBatch, decltype(batch_free)> batch;
const Config config;
std::unique_ptr<void, int(*)(void*)> libHandle;
std::string queryBuffer;
unsigned numQueries;
mutable Mutex queryMutex;
void appendQuery(const std::string& query);
void query(const std::string& query, bool waitForResult = true);
void writeUnlocked();
};
#endif

View File

@@ -0,0 +1,153 @@
#include "CurlWrapper.h"
#include <exception/CurlException.h>
CurlWrapper::CurlWrapper(std::chrono::milliseconds timeout, bool checkSSLCertificates) :
curlHandle(curl_easy_init(), &curl_easy_cleanup)
{
if (curlHandle.get() == NULL)
throw CurlException("Curl init failed.");
if (curl_easy_setopt(curlHandle.get(), CURLOPT_ERRORBUFFER, &errorBuffer) != CURLE_OK)
throw CurlException("Setting Curl error buffer failed.");
if (curl_easy_setopt(curlHandle.get(), CURLOPT_NOSIGNAL, 1L) != CURLE_OK)
throw CurlException(errorBuffer);
if (curl_easy_setopt(curlHandle.get(), CURLOPT_TIMEOUT_MS,
std::chrono::milliseconds(timeout).count()) != CURLE_OK)
throw CurlException(errorBuffer);
if (curl_easy_setopt(curlHandle.get(), CURLOPT_WRITEFUNCTION, writeCallback) != CURLE_OK)
throw CurlException(errorBuffer);
if (curl_easy_setopt(curlHandle.get(), CURLOPT_WRITEDATA, static_cast<void*>(this)) != CURLE_OK)
throw CurlException(errorBuffer);
if (curl_easy_setopt(curlHandle.get(), CURLOPT_CONNECTTIMEOUT_MS,
timeout.count()) != CURLE_OK)
throw CurlException(errorBuffer);
if (!checkSSLCertificates)
{
if (curl_easy_setopt(curlHandle.get(), CURLOPT_SSL_VERIFYPEER, 0) != CURLE_OK)
throw CurlException(errorBuffer);
if (curl_easy_setopt(curlHandle.get(), CURLOPT_SSL_VERIFYHOST, 0) != CURLE_OK)
throw CurlException(errorBuffer);
}
}
void CurlWrapper::enableHttpAuth(const std::string& user, const std::string& password)
{
if (curl_easy_setopt(curlHandle.get(), CURLOPT_HTTPAUTH, CURLAUTH_ANY))
throw CurlException(errorBuffer);
if (curl_easy_setopt(curlHandle.get(), CURLOPT_USERNAME, user.c_str()))
throw CurlException(errorBuffer);
if (curl_easy_setopt(curlHandle.get(), CURLOPT_PASSWORD, password.c_str()))
throw CurlException(errorBuffer);
}
unsigned short CurlWrapper::sendGetRequest(const std::string& url, const ParameterMap& parameters)
{
std::string parameterStr = makeParameterStr(parameters);
if (curl_easy_setopt(curlHandle.get(), CURLOPT_URL, (url + parameterStr).c_str()) != CURLE_OK)
throw CurlException(errorBuffer);
if (curl_easy_setopt(curlHandle.get(), CURLOPT_HTTPGET, 1L) != CURLE_OK)
throw CurlException(errorBuffer);
// replace with curl_multi_perform?
if (curl_easy_perform(curlHandle.get()) != CURLE_OK)
throw CurlException(errorBuffer);
long responseCode;
if (curl_easy_getinfo(curlHandle.get(), CURLINFO_RESPONSE_CODE, &responseCode) != CURLE_OK)
throw CurlException(errorBuffer);
return responseCode;
}
unsigned short CurlWrapper::sendPostRequest(const std::string& url, const char* data,
const ParameterMap& parameters, const std::vector<std::string>& headers)
{
std::string parameterStr = makeParameterStr(parameters);
if (curl_easy_setopt(curlHandle.get(), CURLOPT_URL, (url + parameterStr).c_str()) != CURLE_OK)
throw CurlException(errorBuffer);
if (curl_easy_setopt(curlHandle.get(), CURLOPT_POSTFIELDS, data) != CURLE_OK)
throw CurlException(errorBuffer);
struct curl_slist* headerList = nullptr;
for (const auto& header : headers) {
headerList = curl_slist_append(headerList, header.c_str());
}
if (curl_easy_setopt(curlHandle.get(), CURLOPT_HTTPHEADER, headerList) != CURLE_OK)
throw CurlException(errorBuffer);
// replace with curl_multi_perform?
if (curl_easy_perform(curlHandle.get()) != CURLE_OK)
throw CurlException(errorBuffer);
long responseCode;
if (curl_easy_getinfo(curlHandle.get(), CURLINFO_RESPONSE_CODE, &responseCode) != CURLE_OK)
throw CurlException(errorBuffer);
return responseCode;
}
std::string CurlWrapper::makeParameterStr(const ParameterMap& parameters) const
{
if (!parameters.empty())
{
std::string parameterStr = "?";
for (auto iter = parameters.begin(); iter != parameters.end(); iter++)
{
{
auto escaped = std::unique_ptr<char, void(*)(void*)> (
curl_easy_escape(curlHandle.get(), (iter->first).c_str(),0),
&curl_free);
if (!escaped)
throw CurlException(errorBuffer);
parameterStr += escaped.get();
}
{
auto escaped = std::unique_ptr<char, void(*)(void*)> (
curl_easy_escape(curlHandle.get(), (iter->second).c_str(),0),
&curl_free);
if (!escaped)
throw CurlException(errorBuffer);
parameterStr += "=";
parameterStr += escaped.get();
parameterStr += "&";
}
}
parameterStr.resize(parameterStr.size() - 1);
return parameterStr;
}
return {};
}
size_t CurlWrapper::writeCallback(char *ptr, size_t size, size_t nmemb, void *userdata)
{
auto instance = static_cast<CurlWrapper*>(userdata);
instance->setResponse(std::string(ptr, size*nmemb));
// Always signal success
return size*nmemb;
}

View File

@@ -0,0 +1,57 @@
#ifndef CURL_WRAPPER_H_
#define CURL_WRAPPER_H_
#include <common/threading/Mutex.h>
#include <curl/curl.h>
#include <chrono>
#include <mutex>
#include <unordered_map>
class CurlWrapper
{
public:
CurlWrapper(std::chrono::milliseconds timeout, bool checkSSLCertificates);
CurlWrapper(const CurlWrapper&) = delete;
CurlWrapper& operator=(const CurlWrapper&) = delete;
CurlWrapper(CurlWrapper&&) = delete;
CurlWrapper& operator=(CurlWrapper&&) = delete;
~CurlWrapper() = default;
void enableHttpAuth(const std::string& user, const std::string& password);
typedef std::unordered_map<std::string, std::string> ParameterMap;
unsigned short sendGetRequest(const std::string& url,
const ParameterMap& parameters);
unsigned short sendPostRequest(const std::string& url, const char* data,
const ParameterMap& parameters, const std::vector<std::string>& headers);
static size_t writeCallback(char *ptr, size_t size, size_t nmemb, void *userdata);
protected:
std::unique_ptr<CURL, void(*)(void*)> curlHandle;
std::string response;
char errorBuffer[CURL_ERROR_SIZE];
std::string makeParameterStr(const ParameterMap& parameters) const;
void setResponse(const std::string& response)
{
this->response = response;
}
public:
const std::string& getResponse() const
{
return response;
}
};
#endif

View File

@@ -0,0 +1,344 @@
#include "InfluxDB.h"
#include <common/storage/StorageTargetInfo.h>
#include <common/toolkit/StringTk.h>
#include <exception/DatabaseException.h>
#include <exception/CurlException.h>
#include <thread>
#include <chrono>
#include <boost/algorithm/string/replace.hpp>
static const std::string retentionPolicyName = "auto";
InfluxDB::InfluxDB(Config cfg) :
config(std::move(cfg))
{
curlWrapper = boost::make_unique<CurlWrapper>(config.httpTimeout, config.curlCheckSSLCertificates);
if (config.dbVersion == INFLUXDB)
{
if (!config.username.empty())
curlWrapper->enableHttpAuth(config.username, config.password);
setupDatabase();
}
}
void InfluxDB::setupDatabase() const
{
// Wait for InfluxDB service being available
unsigned tries = 0;
while(!sendPing())
{
tries++;
LOG(DATABASE, ERR, "Coudn't reach InfluxDB service.");
if (tries >= connectionRetries)
throw DatabaseException("Connection to InfluxDB failed.");
else
LOG(DATABASE, WARNING, "Retrying in 10 seconds.");
std::this_thread::sleep_for(std::chrono::seconds(10));
}
// these are called every time the service starts but is being ignored by influxdb if
// the db and rp already exist
sendQuery("create database " + config.database);
if (config.setRetentionPolicy)
{
sendQuery("create retention policy " + retentionPolicyName + " on " + config.database
+ " duration " + config.retentionDuration
+ " replication 1 default");
}
}
void InfluxDB::insertMetaNodeData(std::shared_ptr<Node> node, const MetaNodeDataContent& data)
{
std::ostringstream point;
point << "meta";
point << ",nodeID=" << escapeStringForWrite(node->getAlias());
point << ",nodeNumID=" << node->getNumID();
if(data.isResponding)
{
point << " isResponding=" << std::boolalpha << true;
point << ",indirectWorkListSize=" << data.indirectWorkListSize;
point << ",directWorkListSize=" << data.directWorkListSize;
point << ",hostnameid=\"" << data.hostnameid << "\"";
}
else
{
point << " isResponding=" << std::boolalpha << false;
}
appendPoint(point.str());
}
void InfluxDB::insertStorageNodeData(std::shared_ptr<Node> node,
const StorageNodeDataContent& data)
{
std::ostringstream point;
point << "storage";
point << ",nodeID=" << escapeStringForWrite(node->getAlias());
point << ",nodeNumID=" << node->getNumID();
if(data.isResponding)
{
point << " isResponding=" << std::boolalpha << true;
point << ",indirectWorkListSize=" << data.indirectWorkListSize;
point << ",directWorkListSize=" << data.directWorkListSize;
point << ",diskSpaceTotal=" << data.diskSpaceTotal;
point << ",diskSpaceFree=" << data.diskSpaceFree;
point << ",hostnameid=\"" << data.hostnameid << "\"";
}
else
{
point << " isResponding=" << std::boolalpha << false;
}
appendPoint(point.str());
}
void InfluxDB::insertHighResMetaNodeData(std::shared_ptr<Node> node,
const HighResolutionStats& data)
{
std::ostringstream point;
point << "highResMeta";
point << ",nodeID=" << escapeStringForWrite(node->getAlias());
point << ",nodeNumID=" << node->getNumID();
point << " workRequests=" << data.incVals.workRequests;
point << ",queuedRequests=" << data.rawVals.queuedRequests;
point << ",netSendBytes=" << data.incVals.netSendBytes;
point << ",netRecvBytes=" << data.incVals.netRecvBytes;
// timestamp in ns
point << " " << std::chrono::nanoseconds(
std::chrono::milliseconds(data.rawVals.statsTimeMS)).count();
appendPoint(point.str());
}
void InfluxDB::insertHighResStorageNodeData(std::shared_ptr<Node> node,
const HighResolutionStats& data)
{
std::ostringstream point;
point << "highResStorage";
point << ",nodeID=" << escapeStringForWrite(node->getAlias());
point << ",nodeNumID=" << node->getNumID();
point << " workRequests=" << data.incVals.workRequests;
point << ",queuedRequests=" << data.rawVals.queuedRequests;
point << ",diskWriteBytes=" << data.incVals.diskWriteBytes;
point << ",diskReadBytes=" << data.incVals.diskReadBytes;
point << ",netSendBytes=" << data.incVals.netSendBytes;
point << ",netRecvBytes=" << data.incVals.netRecvBytes;
// timestamp in ns
point << " " << std::chrono::nanoseconds(
std::chrono::milliseconds(data.rawVals.statsTimeMS)).count();
appendPoint(point.str());
}
void InfluxDB::insertStorageTargetsData(std::shared_ptr<Node> node,
const StorageTargetInfo& data)
{
std::ostringstream point;
point << "storageTargets";
point << ",nodeID=" << escapeStringForWrite(node->getAlias());
point << ",nodeNumID=" << node->getNumID();
point << ",storageTargetID=" << data.getTargetID();
point << " diskSpaceTotal=" << data.getDiskSpaceTotal();
point << ",diskSpaceFree=" << data.getDiskSpaceFree();
point << ",inodesTotal=" << data.getInodesTotal();
point << ",inodesFree=" << data.getInodesFree();
std::string t;
if (data.getState() == TargetConsistencyState::TargetConsistencyState_GOOD)
t = "GOOD";
else if (data.getState() == TargetConsistencyState::TargetConsistencyState_NEEDS_RESYNC)
t = "NEEDS_RESYNC";
else
t = "BAD";
point << ",targetConsistencyState=\"" << t << "\"";
appendPoint(point.str());
}
void InfluxDB::insertClientNodeData(const std::string& id, const NodeType nodeType,
const std::map<std::string, uint64_t>& opMap, bool perUser)
{
std::ostringstream point;
if (perUser)
{
if (nodeType == NODETYPE_Meta)
point << "metaClientOpsByUser";
else if (nodeType == NODETYPE_Storage)
point << "storageClientOpsByUser";
else
throw DatabaseException("Invalid Nodetype given.");
}
else
{
if (nodeType == NODETYPE_Meta)
point << "metaClientOpsByNode";
else if (nodeType == NODETYPE_Storage)
point << "storageClientOpsByNode";
else
throw DatabaseException("Invalid Nodetype given.");
}
point << (perUser ? ",user=" : ",node=") << id;
bool first = true;
for (auto iter = opMap.begin(); iter != opMap.end(); iter++)
{
if (iter->second == 0)
continue;
point << (first ? " " : ",") << iter->first << "=" << iter->second;
first = false;
}
// if no fields are != 0, dont write anything
if (!first)
appendPoint(point.str());
}
void InfluxDB::appendPoint(const std::string& point)
{
const std::lock_guard<Mutex> mutexLock(pointsMutex);
points += point + "\n";
numPoints++;
// test also for size? make it an option?
if (numPoints >= config.maxPointsPerRequest)
{
writePointsUnlocked();
}
}
void InfluxDB::write()
{
const std::lock_guard<Mutex> mutexLock(pointsMutex);
writePointsUnlocked();
}
void InfluxDB::writePointsUnlocked()
{
sendWrite(points);
points.clear();
LOG(DATABASE, DEBUG, "Sent data to InfluxDB.", numPoints);
numPoints = 0;
}
void InfluxDB::sendWrite(const std::string& data) const
{
unsigned short responseCode = 0;
CurlWrapper::ParameterMap params;
std::string url;
std::vector<std::string> headers;
if (config.dbVersion == INFLUXDB)
{
params["db"] = config.database;
url = config.host + ":" + StringTk::intToStr(config.port) + "/write";
}
else
{
params["org"] = config.organization;
params["bucket"] = config.bucket;
url = config.host + ":" + StringTk::intToStr(config.port) + "/api/v2/write";
headers.push_back("Authorization: Token " + config.token);
}
const std::lock_guard<Mutex> mutexLock(curlMutex);
try
{
responseCode = curlWrapper->sendPostRequest(url, data.c_str(), params, headers);
}
catch (const CurlException& e)
{
LOG(DATABASE, ERR, "Writing to InfluxDB failed due to Curl error.", ("Error", e.what()));
return;
}
if (responseCode < 200 || responseCode >= 300)
{
LOG(DATABASE, ERR, "Writing to InfluxDB failed.", responseCode,
("responseMessage", curlWrapper->getResponse()));
}
}
void InfluxDB::sendQuery(const std::string& data) const
{
unsigned short responseCode = 0;
CurlWrapper::ParameterMap params;
params["db"] = config.database;
params["q"] = data;
const std::lock_guard<Mutex> mutexLock(curlMutex);
try
{
responseCode = curlWrapper->sendPostRequest(config.host + ":"
+ StringTk::intToStr(config.port)
+ "/query", "", params, {});
}
catch (const CurlException& e)
{
LOG(DATABASE, ERR, "Querying InfluxDB failed due to Curl error.", ("Error", e.what()));
return;
}
if (responseCode < 200 || responseCode >= 300)
{
LOG(DATABASE, ERR, "Querying InfluxDB failed.", responseCode,
("responseMessage", curlWrapper->getResponse()));
}
}
bool InfluxDB::sendPing() const
{
unsigned short responseCode = 0;
const std::lock_guard<Mutex> mutexLock(curlMutex);
try
{
responseCode = curlWrapper->sendGetRequest(config.host + ":"
+ StringTk::intToStr(config.port) + "/ping", CurlWrapper::ParameterMap());
}
catch (const CurlException& e)
{
LOG(DATABASE, ERR, "Pinging InfluxDB failed due to Curl error.", ("Error", e.what()));
return false;
}
if (responseCode < 200 || responseCode >= 300)
{
LOG(DATABASE, ERR, "Pinging InfluxDB failed.", responseCode,
("responseMessage", curlWrapper->getResponse()));
return false;
}
return true;
}
/*
* According to InfluxDB documentation, spaces, "=" and "," need to be escaped for write.
*/
std::string InfluxDB::escapeStringForWrite(const std::string& str)
{
std::string result = str;
boost::replace_all(result, " ", "\\ ");
boost::replace_all(result, "=", "\\=");
boost::replace_all(result, ",", "\\,");
return result;
}

View File

@@ -0,0 +1,84 @@
#ifndef INFLUXDB_H_
#define INFLUXDB_H_
#include <common/nodes/NodeType.h>
#include <common/threading/Mutex.h>
#include <nodes/MetaNodeEx.h>
#include <nodes/StorageNodeEx.h>
#include <misc/CurlWrapper.h>
#include <misc/TSDatabase.h>
#include <app/Config.h>
enum InfluxDBVersion
{
INFLUXDB,
INFLUXDB2,
};
class App;
class InfluxDB : public TSDatabase
{
public:
struct Config
{
std::string host;
int port;
std::string database;
std::chrono::milliseconds httpTimeout;
unsigned maxPointsPerRequest;
bool setRetentionPolicy;
std::string retentionDuration;
bool curlCheckSSLCertificates;
std::string username;
std::string password;
std::string bucket;
std::string organization;
std::string token;
InfluxDBVersion dbVersion;
};
InfluxDB(Config cfg);
virtual ~InfluxDB() {};
virtual void insertMetaNodeData(
std::shared_ptr<Node> node, const MetaNodeDataContent& data) override;
virtual void insertStorageNodeData(
std::shared_ptr<Node> node, const StorageNodeDataContent& data) override;
virtual void insertHighResMetaNodeData(
std::shared_ptr<Node> node, const HighResolutionStats& data) override;
virtual void insertHighResStorageNodeData(
std::shared_ptr<Node> node, const HighResolutionStats& data) override;
virtual void insertStorageTargetsData(
std::shared_ptr<Node> node, const StorageTargetInfo& data) override;
virtual void insertClientNodeData(
const std::string& id, const NodeType nodeType,
const std::map<std::string, uint64_t>& opMap, bool perUser) override;
virtual void write() override;
static std::string escapeStringForWrite(const std::string& str);
private:
const Config config;
std::unique_ptr<CurlWrapper> curlWrapper;
std::string points;
unsigned numPoints = 0;
mutable Mutex pointsMutex;
mutable Mutex curlMutex;
void setupDatabase() const;
void appendPoint(const std::string& point);
void writePointsUnlocked();
void sendWrite(const std::string& data) const;
void sendQuery(const std::string& data) const;
bool sendPing() const;
};
#endif

View File

@@ -0,0 +1,34 @@
#ifndef TS_DATABASE_H_
#define TS_DATABASE_H_
#include <common/nodes/NodeType.h>
#include <nodes/MetaNodeEx.h>
#include <nodes/StorageNodeEx.h>
#include <app/Config.h>
class TSDatabase
{
public:
static const unsigned connectionRetries = 3;
TSDatabase() {};
virtual ~TSDatabase() {};
virtual void insertMetaNodeData(
std::shared_ptr<Node> node, const MetaNodeDataContent& data) = 0;
virtual void insertStorageNodeData(
std::shared_ptr<Node> node, const StorageNodeDataContent& data) = 0;
virtual void insertHighResMetaNodeData(
std::shared_ptr<Node> node, const HighResolutionStats& data) = 0;
virtual void insertHighResStorageNodeData(
std::shared_ptr<Node> node, const HighResolutionStats& data) = 0;
virtual void insertStorageTargetsData(
std::shared_ptr<Node> node, const StorageTargetInfo& data) = 0;
virtual void insertClientNodeData(
const std::string& id, const NodeType nodeType,
const std::map<std::string, uint64_t>& opMap, bool perUser) = 0;
virtual void write() = 0;
};
#endif