mirror of
https://github.com/Icinga/icinga2.git
synced 2026-04-11 11:56:29 -04:00
Merge 267675e80b into a810d6409b
This commit is contained in:
commit
f3340ea68e
12 changed files with 1104 additions and 647 deletions
|
|
@ -136,16 +136,13 @@ void CheckerComponent::CheckThreadProc()
|
|||
|
||||
bool forced = checkable->GetForceNextCheck();
|
||||
bool check = true;
|
||||
bool notifyNextCheck = false;
|
||||
double nextCheck = -1;
|
||||
|
||||
if (!forced) {
|
||||
if (!checkable->IsReachable(DependencyCheckExecution)) {
|
||||
Log(LogNotice, "CheckerComponent")
|
||||
<< "Skipping check for object '" << checkable->GetName() << "': Dependency failed.";
|
||||
|
||||
check = false;
|
||||
notifyNextCheck = true;
|
||||
}
|
||||
|
||||
Host::Ptr host;
|
||||
|
|
@ -182,7 +179,6 @@ void CheckerComponent::CheckThreadProc()
|
|||
<< Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", nextCheck);
|
||||
|
||||
check = false;
|
||||
notifyNextCheck = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -201,11 +197,6 @@ void CheckerComponent::CheckThreadProc()
|
|||
checkable->UpdateNextCheck();
|
||||
}
|
||||
|
||||
if (notifyNextCheck) {
|
||||
// Trigger update event for Icinga DB
|
||||
Checkable::OnNextCheckUpdated(checkable);
|
||||
}
|
||||
|
||||
lock.lock();
|
||||
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -144,6 +144,22 @@ void DependencyGroup::LoadParents(std::set<Checkable::Ptr>& parents) const
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve any child Checkable from the current dependency group.
|
||||
*
|
||||
* @return - Returns the first child Checkable found in this group, or nullptr if there are no children.
|
||||
*/
|
||||
Checkable::Ptr DependencyGroup::GetAnyChild() const
|
||||
{
|
||||
std::lock_guard lock(m_Mutex);
|
||||
for (auto& [_, children] : m_Members) {
|
||||
if (!children.empty()) {
|
||||
return children.begin()->second->GetChild();
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the number of dependency objects in the current dependency group.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -164,6 +164,7 @@ public:
|
|||
void RemoveDependency(const Dependency::Ptr& dependency);
|
||||
std::vector<Dependency::Ptr> GetDependenciesForChild(const Checkable* child) const;
|
||||
void LoadParents(std::set<Checkable::Ptr>& parents) const;
|
||||
Checkable::Ptr GetAnyChild() const;
|
||||
size_t GetDependenciesCount() const;
|
||||
|
||||
void SetIcingaDBIdentifier(const String& identifier);
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ mkclass_target(icingadb.ti icingadb-ti.cpp icingadb-ti.hpp)
|
|||
mkembedconfig_target(icingadb-itl.conf icingadb-itl.cpp)
|
||||
|
||||
set(icingadb_SOURCES
|
||||
icingadb.cpp icingadb-objects.cpp icingadb-stats.cpp icingadb-utility.cpp redisconnection.cpp icingadb-ti.hpp
|
||||
icingadb.cpp icingadb-objects.cpp icingadb-stats.cpp icingadb-utility.cpp icingadb-worker.cpp redisconnection.cpp icingadb-ti.hpp
|
||||
icingadbchecktask.cpp icingadb-itl.cpp
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -42,8 +42,6 @@
|
|||
|
||||
using namespace icinga;
|
||||
|
||||
using Prio = RedisConnection::QueryPriority;
|
||||
|
||||
INITIALIZE_ONCE(&IcingaDB::ConfigStaticInitialize);
|
||||
|
||||
// A list of all types for which we want to sync custom variables, along with their corresponding Redis key.
|
||||
|
|
@ -167,8 +165,8 @@ void IcingaDB::ConfigStaticInitialize()
|
|||
IcingaDB::NewCheckResultHandler(checkable);
|
||||
});
|
||||
|
||||
Checkable::OnNextCheckUpdated.connect([](const Checkable::Ptr& checkable) {
|
||||
IcingaDB::NextCheckUpdatedHandler(checkable);
|
||||
Checkable::OnNextCheckChanged.connect([](const Checkable::Ptr& checkable, const Value&) {
|
||||
IcingaDB::NextCheckChangedHandler(checkable);
|
||||
});
|
||||
|
||||
Service::OnHostProblemChanged.connect([](const Service::Ptr& service, const CheckResult::Ptr&, const MessageOrigin::Ptr&) {
|
||||
|
|
@ -212,8 +210,9 @@ void IcingaDB::ConfigStaticInitialize()
|
|||
|
||||
void IcingaDB::UpdateAllConfigObjects()
|
||||
{
|
||||
m_Rcon->Sync();
|
||||
m_Rcon->FireAndForgetQuery({"XADD", "icinga:schema", "MAXLEN", "1", "*", "version", "6"}, Prio::Heartbeat);
|
||||
// This function performs an initial dump of all configuration objects into Redis, thus there are no
|
||||
// previously enqueued queries on m_RconWorker that we need to wait for. So, no Sync() call is necessary here.
|
||||
m_RconWorker->FireAndForgetQuery({"XADD", "icinga:schema", "MAXLEN", "1", "*", "version", "6"}, {}, true);
|
||||
|
||||
Log(LogInformation, "IcingaDB") << "Starting initial config/status dump";
|
||||
double startTime = Utility::GetTime();
|
||||
|
|
@ -228,16 +227,8 @@ void IcingaDB::UpdateAllConfigObjects()
|
|||
WorkQueue upq(25000, Configuration::Concurrency, LogNotice);
|
||||
upq.SetName("IcingaDB:ConfigDump");
|
||||
|
||||
m_Rcon->SuppressQueryKind(Prio::CheckResult);
|
||||
m_Rcon->SuppressQueryKind(Prio::RuntimeStateSync);
|
||||
|
||||
Defer unSuppress ([this]() {
|
||||
m_Rcon->UnsuppressQueryKind(Prio::RuntimeStateSync);
|
||||
m_Rcon->UnsuppressQueryKind(Prio::CheckResult);
|
||||
});
|
||||
|
||||
// Add a new type=* state=wip entry to the stream and remove all previous entries (MAXLEN 1).
|
||||
m_Rcon->FireAndForgetQuery({"XADD", "icinga:dump", "MAXLEN", "1", "*", "key", "*", "state", "wip"}, Prio::Config);
|
||||
m_RconWorker->FireAndForgetQuery({"XADD", "icinga:dump", "MAXLEN", "1", "*", "key", "*", "state", "wip"});
|
||||
|
||||
const std::vector<RedisConnection::QueryArg> globalKeys = {
|
||||
CONFIG_REDIS_KEY_PREFIX "customvar",
|
||||
|
|
@ -254,9 +245,9 @@ void IcingaDB::UpdateAllConfigObjects()
|
|||
CONFIG_REDIS_KEY_PREFIX "redundancygroup",
|
||||
CONFIG_REDIS_KEY_PREFIX "redundancygroup:state",
|
||||
};
|
||||
DeleteKeys(m_Rcon, globalKeys, Prio::Config);
|
||||
DeleteKeys(m_Rcon, {"icinga:nextupdate:host", "icinga:nextupdate:service"}, Prio::Config);
|
||||
m_Rcon->Sync();
|
||||
DeleteKeys(m_RconWorker, globalKeys);
|
||||
DeleteKeys(m_RconWorker, {"icinga:nextupdate:host", "icinga:nextupdate:service"});
|
||||
m_RconWorker->Sync();
|
||||
|
||||
Defer resetDumpedGlobals ([this]() {
|
||||
m_DumpedGlobals.CustomVar.Reset();
|
||||
|
|
@ -277,7 +268,7 @@ void IcingaDB::UpdateAllConfigObjects()
|
|||
|
||||
auto& rcon (m_Rcons.at(ctype));
|
||||
|
||||
DeleteKeys(rcon, GetTypeOverwriteKeys(type), Prio::Config);
|
||||
DeleteKeys(rcon, GetTypeOverwriteKeys(type));
|
||||
|
||||
WorkQueue upqObjectType(25000, Configuration::Concurrency, LogNotice);
|
||||
upqObjectType.SetName("IcingaDB:ConfigDump:" + type->GetName().ToLower());
|
||||
|
|
@ -288,9 +279,7 @@ void IcingaDB::UpdateAllConfigObjects()
|
|||
String cursor = "0";
|
||||
|
||||
do {
|
||||
Array::Ptr res = rcon->GetResultOfQuery({
|
||||
"HSCAN", redisKeyPair.ChecksumKey, cursor, "COUNT", "1000"
|
||||
}, Prio::Config);
|
||||
Array::Ptr res = rcon->GetResultOfQuery({"HSCAN", redisKeyPair.ChecksumKey, cursor, "COUNT", "1000"});
|
||||
|
||||
AddKvsToMap(res->Get(1), redisCheckSums);
|
||||
|
||||
|
|
@ -306,7 +295,6 @@ void IcingaDB::UpdateAllConfigObjects()
|
|||
|
||||
upqObjectType.ParallelFor(objectChunks, [&](decltype(objectChunks)::const_reference chunk) {
|
||||
std::map<RedisConnection::QueryArg, RedisConnection::Query> hMSets;
|
||||
RedisConnection::Query hostZAdds = {"ZADD", "icinga:nextupdate:host"}, serviceZAdds = {"ZADD", "icinga:nextupdate:service"};
|
||||
|
||||
auto skimObjects ([&]() {
|
||||
std::lock_guard<std::mutex> l (ourContentMutex);
|
||||
|
|
@ -366,18 +354,7 @@ void IcingaDB::UpdateAllConfigObjects()
|
|||
auto checkable (dynamic_pointer_cast<Checkable>(object));
|
||||
|
||||
if (checkable && checkable->GetEnableActiveChecks()) {
|
||||
auto zAdds (dynamic_pointer_cast<Service>(checkable) ? &serviceZAdds : &hostZAdds);
|
||||
|
||||
zAdds->emplace_back(Convert::ToString(checkable->GetNextUpdate()));
|
||||
zAdds->emplace_back(GetObjectIdentifier(checkable));
|
||||
|
||||
if (zAdds->size() >= 102u) {
|
||||
RedisConnection::Query header (zAdds->begin(), zAdds->begin() + 2u);
|
||||
|
||||
rcon->FireAndForgetQuery(std::move(*zAdds), Prio::CheckResult);
|
||||
|
||||
*zAdds = std::move(header);
|
||||
}
|
||||
EnqueueConfigObject(checkable, icingadb::task_queue::NextUpdate);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -385,12 +362,6 @@ void IcingaDB::UpdateAllConfigObjects()
|
|||
|
||||
ExecuteRedisTransaction(rcon, hMSets, {});
|
||||
|
||||
for (auto zAdds : {&hostZAdds, &serviceZAdds}) {
|
||||
if (zAdds->size() > 2u) {
|
||||
rcon->FireAndForgetQuery(std::move(*zAdds), Prio::CheckResult);
|
||||
}
|
||||
}
|
||||
|
||||
Log(LogNotice, "IcingaDB")
|
||||
<< "Dumped " << bulkCounter << " objects of " << type->ToString();
|
||||
});
|
||||
|
|
@ -451,7 +422,7 @@ void IcingaDB::UpdateAllConfigObjects()
|
|||
setChecksum.clear();
|
||||
setObject.clear();
|
||||
|
||||
rcon->FireAndForgetQueries(std::move(transaction), Prio::Config, {affectedConfig});
|
||||
rcon->FireAndForgetQueries(std::move(transaction), {affectedConfig});
|
||||
});
|
||||
|
||||
auto flushDels ([&]() {
|
||||
|
|
@ -470,7 +441,7 @@ void IcingaDB::UpdateAllConfigObjects()
|
|||
delChecksum.clear();
|
||||
delObject.clear();
|
||||
|
||||
rcon->FireAndForgetQueries(std::move(transaction), Prio::Config, {affectedConfig});
|
||||
rcon->FireAndForgetQueries(std::move(transaction), {affectedConfig});
|
||||
});
|
||||
|
||||
auto setOne ([&]() {
|
||||
|
|
@ -533,7 +504,7 @@ void IcingaDB::UpdateAllConfigObjects()
|
|||
auto keys = GetTypeOverwriteKeys(type, true);
|
||||
keys.emplace_back(redisKeyPair.ObjectKey);
|
||||
for (auto& key : keys) {
|
||||
rcon->FireAndForgetQuery({"XADD", "icinga:dump", "*", "key", key, "state", "done"}, Prio::Config);
|
||||
rcon->FireAndForgetQuery({"XADD", "icinga:dump", "*", "key", key, "state", "done"});
|
||||
}
|
||||
rcon->Sync();
|
||||
});
|
||||
|
|
@ -554,14 +525,14 @@ void IcingaDB::UpdateAllConfigObjects()
|
|||
}
|
||||
|
||||
for (auto& key : globalKeys) {
|
||||
m_Rcon->FireAndForgetQuery({"XADD", "icinga:dump", "*", "key", key, "state", "done"}, Prio::Config);
|
||||
m_RconWorker->FireAndForgetQuery({"XADD", "icinga:dump", "*", "key", key, "state", "done"});
|
||||
}
|
||||
|
||||
m_Rcon->FireAndForgetQuery({"XADD", "icinga:dump", "*", "key", "*", "state", "done"}, Prio::Config);
|
||||
m_RconWorker->FireAndForgetQuery({"XADD", "icinga:dump", "*", "key", "*", "state", "done"});
|
||||
|
||||
// enqueue a callback that will notify us once all previous queries were executed and wait for this event
|
||||
std::promise<void> p;
|
||||
m_Rcon->EnqueueCallback([&p](boost::asio::yield_context&) { p.set_value(); }, Prio::Config);
|
||||
m_RconWorker->EnqueueCallback([&p](boost::asio::yield_context&) { p.set_value(); });
|
||||
p.get_future().wait();
|
||||
|
||||
auto endTime (Utility::GetTime());
|
||||
|
|
@ -594,13 +565,13 @@ std::vector<std::vector<intrusive_ptr<ConfigObject>>> IcingaDB::ChunkObjects(std
|
|||
return chunks;
|
||||
}
|
||||
|
||||
void IcingaDB::DeleteKeys(const RedisConnection::Ptr& conn, const std::vector<RedisConnection::QueryArg>& keys, RedisConnection::QueryPriority priority) {
|
||||
void IcingaDB::DeleteKeys(const RedisConnection::Ptr& conn, const std::vector<RedisConnection::QueryArg>& keys) {
|
||||
RedisConnection::Query query = {"DEL"};
|
||||
for (auto& key : keys) {
|
||||
query.emplace_back(key);
|
||||
}
|
||||
|
||||
conn->FireAndForgetQuery(std::move(query), priority);
|
||||
conn->FireAndForgetQuery(std::move(query));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1341,21 +1312,21 @@ void IcingaDB::InsertCheckableDependencies(
|
|||
* Update the state information of a checkable in Redis.
|
||||
*
|
||||
* What is updated exactly depends on the mode parameter:
|
||||
* - Volatile: Update the volatile state information stored in icinga:host:state or icinga:service:state as well as
|
||||
* the corresponding checksum stored in icinga:checksum:host:state or icinga:checksum:service:state.
|
||||
* - RuntimeOnly: Write a runtime update to the icinga:runtime:state stream. It is up to the caller to ensure that
|
||||
* - VolatileState: Update the volatile state information stored in icinga:host:state or icinga:service:state as well
|
||||
* as the corresponding checksum stored in icinga:checksum:host:state or icinga:checksum:service:state.
|
||||
* - RuntimeState: Write a runtime update to the icinga:runtime:state stream. It is up to the caller to ensure that
|
||||
* identical volatile state information was already written before to avoid inconsistencies. This mode is only
|
||||
* useful to upgrade a previous Volatile to a Full operation, otherwise Full should be used.
|
||||
* - Full: Perform an update of all state information in Redis, that is updating the volatile information and sending
|
||||
* a corresponding runtime update so that this state update gets written through to the persistent database by a
|
||||
* running icingadb process.
|
||||
* - FullState: Perform an update of all state information in Redis, that is updating the volatile information and
|
||||
* sending a corresponding runtime update so that this state update gets written through to the persistent database
|
||||
* by a running icingadb process.
|
||||
*
|
||||
* @param checkable State of this checkable is updated in Redis
|
||||
* @param mode Mode of operation (StateUpdate::Volatile, StateUpdate::RuntimeOnly, or StateUpdate::Full)
|
||||
* @param mode Mode of operation (DirtyBits:VolatileState, DirtyBits::RuntimeState, or DirtyBits::FullState)
|
||||
*/
|
||||
void IcingaDB::UpdateState(const Checkable::Ptr& checkable, StateUpdate mode)
|
||||
void IcingaDB::UpdateState(const Checkable::Ptr& checkable, uint32_t mode)
|
||||
{
|
||||
if (!m_Rcon || !m_Rcon->IsConnected())
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected())
|
||||
return;
|
||||
|
||||
Dictionary::Ptr stateAttrs = SerializeState(checkable);
|
||||
|
|
@ -1363,15 +1334,15 @@ void IcingaDB::UpdateState(const Checkable::Ptr& checkable, StateUpdate mode)
|
|||
String checksum = HashValue(stateAttrs);
|
||||
|
||||
auto [redisStateKey, redisChecksumKey] = GetCheckableStateKeys(checkable->GetReflectionType());
|
||||
if (mode & StateUpdate::Volatile) {
|
||||
if (mode & icingadb::task_queue::VolatileState) {
|
||||
String objectKey = GetObjectIdentifier(checkable);
|
||||
m_Rcon->FireAndForgetQueries({
|
||||
m_RconWorker->FireAndForgetQueries({
|
||||
{"HSET", redisStateKey, objectKey, JsonEncode(stateAttrs)},
|
||||
{"HSET", redisChecksumKey, objectKey, JsonEncode(new Dictionary({{"checksum", checksum}}))},
|
||||
}, Prio::RuntimeStateSync);
|
||||
});
|
||||
}
|
||||
|
||||
if (mode & StateUpdate::RuntimeOnly) {
|
||||
if (mode & icingadb::task_queue::RuntimeState) {
|
||||
ObjectLock olock(stateAttrs);
|
||||
|
||||
RedisConnection::Query streamadd({
|
||||
|
|
@ -1386,35 +1357,23 @@ void IcingaDB::UpdateState(const Checkable::Ptr& checkable, StateUpdate mode)
|
|||
streamadd.emplace_back(IcingaToStreamValue(kv.second));
|
||||
}
|
||||
|
||||
m_Rcon->FireAndForgetQuery(std::move(streamadd), Prio::RuntimeStateStream, {0, 1});
|
||||
m_RconWorker->FireAndForgetQuery(std::move(streamadd), {0, 1});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send dependencies state information of the given Checkable to Redis.
|
||||
* Update the dependency state information of the given checkable and its associated dependency groups in Redis.
|
||||
*
|
||||
* If the dependencyGroup parameter is set, only the dependencies state of that group are sent. Otherwise, all
|
||||
* dependency groups of the provided Checkable are processed.
|
||||
* This function serializes the dependency state information of the provided Checkable object and its associated
|
||||
* DependencyGroup into Redis HMSETs and streams the state updates to the runtime state stream. It's intended to
|
||||
* be called by the background worker when processing runtime updates for Checkable objects that are part of some
|
||||
* dependency graph.
|
||||
*
|
||||
* @param checkable The Checkable you want to send the dependencies state update for
|
||||
* @param onlyDependencyGroup If set, send state updates only for this dependency group and its dependencies.
|
||||
* @param seenGroups A container to track already processed DependencyGroups to avoid duplicate state updates.
|
||||
* @param dependencyGroup The dependency group to process for the given checkable.
|
||||
*/
|
||||
void IcingaDB::UpdateDependenciesState(const Checkable::Ptr& checkable, const DependencyGroup::Ptr& onlyDependencyGroup,
|
||||
std::set<DependencyGroup*>* seenGroups) const
|
||||
void IcingaDB::UpdateDependenciesState(const Checkable::Ptr& checkable, const DependencyGroup::Ptr& dependencyGroup) const
|
||||
{
|
||||
if (!m_Rcon || !m_Rcon->IsConnected()) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<DependencyGroup::Ptr> dependencyGroups{onlyDependencyGroup};
|
||||
if (!onlyDependencyGroup) {
|
||||
dependencyGroups = checkable->GetDependencyGroups();
|
||||
if (dependencyGroups.empty()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
RedisConnection::Queries streamStates;
|
||||
auto addDependencyStateToStream([&streamStates](std::string_view redisKey, const Dictionary::Ptr& stateAttrs) {
|
||||
RedisConnection::Query xAdd{
|
||||
|
|
@ -1430,61 +1389,39 @@ void IcingaDB::UpdateDependenciesState(const Checkable::Ptr& checkable, const De
|
|||
});
|
||||
|
||||
std::map<RedisConnection::QueryArg, RedisConnection::Query> hMSets;
|
||||
for (auto& dependencyGroup : dependencyGroups) {
|
||||
bool isRedundancyGroup(dependencyGroup->IsRedundancyGroup());
|
||||
if (isRedundancyGroup && dependencyGroup->GetIcingaDBIdentifier().IsEmpty()) {
|
||||
// Way too soon! The Icinga DB hash will be set during the initial config dump, but this state
|
||||
// update seems to occur way too early. So, we've to skip it for now and wait for the next one.
|
||||
// The m_ConfigDumpInProgress flag is probably still set to true at this point!
|
||||
continue;
|
||||
}
|
||||
auto dependencies(dependencyGroup->GetDependenciesForChild(checkable.get()));
|
||||
std::sort(dependencies.begin(), dependencies.end(), [](const Dependency::Ptr& lhs, const Dependency::Ptr& rhs) {
|
||||
return lhs->GetParent() < rhs->GetParent();
|
||||
});
|
||||
for (auto it(dependencies.begin()); it != dependencies.end(); /* no increment */) {
|
||||
const auto& dependency(*it);
|
||||
|
||||
if (seenGroups && !seenGroups->insert(dependencyGroup.get()).second) {
|
||||
// Usually, if the seenGroups set is provided, IcingaDB is triggering a runtime state update for ALL
|
||||
// children of a given initiator Checkable (parent). In such cases, we may end up with lots of useless
|
||||
// state updates as all the children of a non-redundant group a) share the same entry in the database b)
|
||||
// it doesn't matter which child triggers the state update first all the subsequent updates are just useless.
|
||||
//
|
||||
// Likewise, for redundancy groups, all children of a redundancy group share the same set of parents
|
||||
// and thus the resulting state information would be the same from each child Checkable perspective.
|
||||
// So, serializing the redundancy group state information only once is sufficient.
|
||||
continue;
|
||||
}
|
||||
|
||||
auto dependencies(dependencyGroup->GetDependenciesForChild(checkable.get()));
|
||||
std::sort(dependencies.begin(), dependencies.end(), [](const Dependency::Ptr& lhs, const Dependency::Ptr& rhs) {
|
||||
return lhs->GetParent() < rhs->GetParent();
|
||||
});
|
||||
for (auto it(dependencies.begin()); it != dependencies.end(); /* no increment */) {
|
||||
const auto& dependency(*it);
|
||||
|
||||
Dictionary::Ptr stateAttrs;
|
||||
// Note: The following loop is intended to cover some possible special cases but may not occur in practice
|
||||
// that often. That is, having two or more dependency objects that point to the same parent Checkable.
|
||||
// So, traverse all those duplicates and merge their relevant state information into a single edge.
|
||||
for (; it != dependencies.end() && (*it)->GetParent() == dependency->GetParent(); ++it) {
|
||||
if (!stateAttrs || stateAttrs->Get("failed") == false) {
|
||||
stateAttrs = SerializeDependencyEdgeState(dependencyGroup, *it);
|
||||
}
|
||||
Dictionary::Ptr stateAttrs;
|
||||
// Note: The following loop is intended to cover some possible special cases but may not occur in practice
|
||||
// that often. That is, having two or more dependency objects that point to the same parent Checkable.
|
||||
// So, traverse all those duplicates and merge their relevant state information into a single edge.
|
||||
for (; it != dependencies.end() && (*it)->GetParent() == dependency->GetParent(); ++it) {
|
||||
if (!stateAttrs || stateAttrs->Get("failed") == false) {
|
||||
stateAttrs = SerializeDependencyEdgeState(dependencyGroup, *it);
|
||||
}
|
||||
|
||||
addDependencyStateToStream(CONFIG_REDIS_KEY_PREFIX "dependency:edge:state", stateAttrs);
|
||||
AddDataToHmSets(hMSets, CONFIG_REDIS_KEY_PREFIX "dependency:edge:state", stateAttrs->Get("id"), stateAttrs);
|
||||
}
|
||||
|
||||
if (isRedundancyGroup) {
|
||||
Dictionary::Ptr stateAttrs(SerializeRedundancyGroupState(checkable, dependencyGroup));
|
||||
addDependencyStateToStream(CONFIG_REDIS_KEY_PREFIX "dependency:edge:state", stateAttrs);
|
||||
AddDataToHmSets(hMSets, CONFIG_REDIS_KEY_PREFIX "dependency:edge:state", stateAttrs->Get("id"), stateAttrs);
|
||||
}
|
||||
|
||||
Dictionary::Ptr sharedGroupState(stateAttrs->ShallowClone());
|
||||
sharedGroupState->Remove("redundancy_group_id");
|
||||
sharedGroupState->Remove("is_reachable");
|
||||
sharedGroupState->Remove("last_state_change");
|
||||
if (dependencyGroup->IsRedundancyGroup()) {
|
||||
Dictionary::Ptr stateAttrs(SerializeRedundancyGroupState(checkable, dependencyGroup));
|
||||
|
||||
addDependencyStateToStream(CONFIG_REDIS_KEY_PREFIX "redundancygroup:state", stateAttrs);
|
||||
addDependencyStateToStream(CONFIG_REDIS_KEY_PREFIX "dependency:edge:state", sharedGroupState);
|
||||
AddDataToHmSets(hMSets, CONFIG_REDIS_KEY_PREFIX "redundancygroup:state", dependencyGroup->GetIcingaDBIdentifier(), stateAttrs);
|
||||
AddDataToHmSets(hMSets, CONFIG_REDIS_KEY_PREFIX "dependency:edge:state", dependencyGroup->GetIcingaDBIdentifier(), sharedGroupState);
|
||||
}
|
||||
Dictionary::Ptr sharedGroupState(stateAttrs->ShallowClone());
|
||||
sharedGroupState->Remove("redundancy_group_id");
|
||||
sharedGroupState->Remove("is_reachable");
|
||||
sharedGroupState->Remove("last_state_change");
|
||||
|
||||
addDependencyStateToStream(CONFIG_REDIS_KEY_PREFIX "redundancygroup:state", stateAttrs);
|
||||
addDependencyStateToStream(CONFIG_REDIS_KEY_PREFIX "dependency:edge:state", sharedGroupState);
|
||||
AddDataToHmSets(hMSets, CONFIG_REDIS_KEY_PREFIX "redundancygroup:state", dependencyGroup->GetIcingaDBIdentifier(), stateAttrs);
|
||||
AddDataToHmSets(hMSets, CONFIG_REDIS_KEY_PREFIX "dependency:edge:state", dependencyGroup->GetIcingaDBIdentifier(), sharedGroupState);
|
||||
}
|
||||
|
||||
if (!streamStates.empty()) {
|
||||
|
|
@ -1494,30 +1431,8 @@ void IcingaDB::UpdateDependenciesState(const Checkable::Ptr& checkable, const De
|
|||
queries.emplace_back(std::move(query));
|
||||
}
|
||||
|
||||
m_Rcon->FireAndForgetQueries(std::move(queries), Prio::RuntimeStateSync);
|
||||
m_Rcon->FireAndForgetQueries(std::move(streamStates), Prio::RuntimeStateStream, {0, 1});
|
||||
}
|
||||
}
|
||||
|
||||
// Used to update a single object, used for runtime updates
|
||||
void IcingaDB::SendConfigUpdate(const ConfigObject::Ptr& object, bool runtimeUpdate)
|
||||
{
|
||||
if (!m_Rcon || !m_Rcon->IsConnected())
|
||||
return;
|
||||
|
||||
std::map<RedisConnection::QueryArg, RedisConnection::Query> hMSets;
|
||||
std::vector<Dictionary::Ptr> runtimeUpdates;
|
||||
|
||||
CreateConfigUpdate(object, GetSyncableTypeRedisKeys(object->GetReflectionType()), hMSets, runtimeUpdates, runtimeUpdate);
|
||||
Checkable::Ptr checkable = dynamic_pointer_cast<Checkable>(object);
|
||||
if (checkable) {
|
||||
UpdateState(checkable, runtimeUpdate ? StateUpdate::Full : StateUpdate::Volatile);
|
||||
}
|
||||
|
||||
ExecuteRedisTransaction(m_Rcon, hMSets, runtimeUpdates);
|
||||
|
||||
if (checkable) {
|
||||
SendNextUpdate(checkable);
|
||||
m_RconWorker->FireAndForgetQueries(std::move(queries));
|
||||
m_RconWorker->FireAndForgetQueries(std::move(streamStates), {0, 1});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1853,7 +1768,7 @@ IcingaDB::CreateConfigUpdate(const ConfigObject::Ptr& object, const QueryArgPair
|
|||
return;
|
||||
*/
|
||||
|
||||
if (m_Rcon == nullptr)
|
||||
if (m_RconWorker == nullptr)
|
||||
return;
|
||||
|
||||
Dictionary::Ptr attr = new Dictionary;
|
||||
|
|
@ -1883,29 +1798,14 @@ IcingaDB::CreateConfigUpdate(const ConfigObject::Ptr& object, const QueryArgPair
|
|||
|
||||
void IcingaDB::SendConfigDelete(const ConfigObject::Ptr& object)
|
||||
{
|
||||
if (!m_Rcon || !m_Rcon->IsConnected())
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected())
|
||||
return;
|
||||
|
||||
Type::Ptr type = object->GetReflectionType();
|
||||
String objectKey = GetObjectIdentifier(object);
|
||||
auto redisKeyPair = GetSyncableTypeRedisKeys(type);
|
||||
|
||||
m_Rcon->FireAndForgetQueries({
|
||||
{"HDEL", redisKeyPair.ObjectKey, objectKey},
|
||||
{"HDEL", redisKeyPair.ChecksumKey, objectKey},
|
||||
{
|
||||
"XADD", "icinga:runtime", "MAXLEN", "~", "1000000", "*",
|
||||
"redis_key", redisKeyPair.ObjectKey, "id", objectKey, "runtime_type", "delete"
|
||||
}
|
||||
}, Prio::Config);
|
||||
|
||||
CustomVarObject::Ptr customVarObject = dynamic_pointer_cast<CustomVarObject>(object);
|
||||
|
||||
if (customVarObject) {
|
||||
Dictionary::Ptr vars = customVarObject->GetVars();
|
||||
SendCustomVarsChanged(object, vars, nullptr);
|
||||
if (auto customVarObject = dynamic_pointer_cast<CustomVarObject>(object); customVarObject) {
|
||||
SendCustomVarsChanged(object, customVarObject->GetVars(), nullptr);
|
||||
}
|
||||
|
||||
Type::Ptr type = object->GetReflectionType();
|
||||
if (type == Host::TypeInstance || type == Service::TypeInstance) {
|
||||
Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
|
||||
|
||||
|
|
@ -1913,17 +1813,9 @@ void IcingaDB::SendConfigDelete(const ConfigObject::Ptr& object)
|
|||
Service::Ptr service;
|
||||
tie(host, service) = GetHostService(checkable);
|
||||
|
||||
m_Rcon->FireAndForgetQuery({
|
||||
"ZREM",
|
||||
service ? "icinga:nextupdate:service" : "icinga:nextupdate:host",
|
||||
GetObjectIdentifier(checkable)
|
||||
}, Prio::CheckResult);
|
||||
|
||||
auto [configStateKey, checksumStateKey] = GetCheckableStateKeys(checkable->GetReflectionType());
|
||||
m_Rcon->FireAndForgetQueries({
|
||||
{"HDEL", configStateKey, objectKey},
|
||||
{"HDEL", checksumStateKey, objectKey}
|
||||
}, Prio::RuntimeStateSync);
|
||||
EnqueueRelationsDeletion(GetObjectIdentifier(checkable), {{configStateKey, checksumStateKey}});
|
||||
EnqueueConfigObject(object, icingadb::task_queue::ConfigDelete | icingadb::task_queue::NextUpdate); // Send also ZREM for next update
|
||||
|
||||
if (service) {
|
||||
SendGroupsChanged<ServiceGroup>(checkable, service->GetGroups(), nullptr);
|
||||
|
|
@ -1934,6 +1826,8 @@ void IcingaDB::SendConfigDelete(const ConfigObject::Ptr& object)
|
|||
return;
|
||||
}
|
||||
|
||||
EnqueueConfigObject(object, icingadb::task_queue::ConfigDelete);
|
||||
|
||||
if (type == TimePeriod::TypeInstance) {
|
||||
TimePeriod::Ptr timeperiod = static_pointer_cast<TimePeriod>(object);
|
||||
SendTimePeriodRangesChanged(timeperiod, timeperiod->GetRanges(), nullptr);
|
||||
|
|
@ -1994,7 +1888,7 @@ void IcingaDB::SendStateChange(const ConfigObject::Ptr& object, const CheckResul
|
|||
|
||||
tie(host, service) = GetHostService(checkable);
|
||||
|
||||
UpdateState(checkable, StateUpdate::RuntimeOnly);
|
||||
EnqueueConfigObject(checkable, icingadb::task_queue::RuntimeState);
|
||||
|
||||
int hard_state{};
|
||||
if (!cr) {
|
||||
|
|
@ -2175,7 +2069,7 @@ void IcingaDB::SendStartedDowntime(const Downtime::Ptr& downtime)
|
|||
return;
|
||||
}
|
||||
|
||||
SendConfigUpdate(downtime, true);
|
||||
EnqueueConfigObject(downtime, icingadb::task_queue::ConfigUpdate);
|
||||
|
||||
auto checkable (downtime->GetCheckable());
|
||||
auto triggeredBy (Downtime::GetByName(downtime->GetTriggeredBy()));
|
||||
|
|
@ -2185,7 +2079,7 @@ void IcingaDB::SendStartedDowntime(const Downtime::Ptr& downtime)
|
|||
tie(host, service) = GetHostService(checkable);
|
||||
|
||||
/* Update checkable state as in_downtime may have changed. */
|
||||
UpdateState(checkable, StateUpdate::Full);
|
||||
EnqueueConfigObject(checkable, icingadb::task_queue::FullState);
|
||||
|
||||
RedisConnection::Query xAdd ({
|
||||
"XADD", "icinga:history:stream:downtime", "*",
|
||||
|
|
@ -2274,7 +2168,7 @@ void IcingaDB::SendRemovedDowntime(const Downtime::Ptr& downtime)
|
|||
return;
|
||||
|
||||
/* Update checkable state as in_downtime may have changed. */
|
||||
UpdateState(checkable, StateUpdate::Full);
|
||||
EnqueueConfigObject(checkable, icingadb::task_queue::FullState);
|
||||
|
||||
RedisConnection::Query xAdd ({
|
||||
"XADD", "icinga:history:stream:downtime", "*",
|
||||
|
|
@ -2362,6 +2256,9 @@ void IcingaDB::SendAddedComment(const Comment::Ptr& comment)
|
|||
Service::Ptr service;
|
||||
tie(host, service) = GetHostService(checkable);
|
||||
|
||||
// Update the checkable state to so that the "last_comment_id" is correctly reflected.
|
||||
EnqueueConfigObject(checkable, icingadb::task_queue::FullState);
|
||||
|
||||
RedisConnection::Query xAdd ({
|
||||
"XADD", "icinga:history:stream:comment", "*",
|
||||
"comment_id", GetObjectIdentifier(comment),
|
||||
|
|
@ -2404,7 +2301,6 @@ void IcingaDB::SendAddedComment(const Comment::Ptr& comment)
|
|||
}
|
||||
|
||||
m_HistoryBulker.ProduceOne(std::move(xAdd));
|
||||
UpdateState(checkable, StateUpdate::Full);
|
||||
}
|
||||
|
||||
void IcingaDB::SendRemovedComment(const Comment::Ptr& comment)
|
||||
|
|
@ -2434,6 +2330,9 @@ void IcingaDB::SendRemovedComment(const Comment::Ptr& comment)
|
|||
Service::Ptr service;
|
||||
tie(host, service) = GetHostService(checkable);
|
||||
|
||||
// Update the checkable state to so that the "last_comment_id" is correctly reflected.
|
||||
EnqueueConfigObject(checkable, icingadb::task_queue::FullState);
|
||||
|
||||
RedisConnection::Query xAdd ({
|
||||
"XADD", "icinga:history:stream:comment", "*",
|
||||
"comment_id", GetObjectIdentifier(comment),
|
||||
|
|
@ -2484,7 +2383,6 @@ void IcingaDB::SendRemovedComment(const Comment::Ptr& comment)
|
|||
}
|
||||
|
||||
m_HistoryBulker.ProduceOne(std::move(xAdd));
|
||||
UpdateState(checkable, StateUpdate::Full);
|
||||
}
|
||||
|
||||
void IcingaDB::SendFlappingChange(const Checkable::Ptr& checkable, double changeTime, double flappingLastChange)
|
||||
|
|
@ -2554,29 +2452,27 @@ void IcingaDB::SendFlappingChange(const Checkable::Ptr& checkable, double change
|
|||
m_HistoryBulker.ProduceOne(std::move(xAdd));
|
||||
}
|
||||
|
||||
void IcingaDB::SendNextUpdate(const Checkable::Ptr& checkable)
|
||||
void IcingaDB::SendNextUpdate(const Checkable::Ptr& checkable) const
|
||||
{
|
||||
if (!m_Rcon || !m_Rcon->IsConnected())
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected())
|
||||
return;
|
||||
|
||||
if (checkable->GetEnableActiveChecks()) {
|
||||
m_Rcon->FireAndForgetQuery(
|
||||
if (checkable->GetEnableActiveChecks() && !checkable->GetExtension("ConfigObjectDeleted")) {
|
||||
m_RconWorker->FireAndForgetQuery(
|
||||
{
|
||||
"ZADD",
|
||||
dynamic_pointer_cast<Service>(checkable) ? "icinga:nextupdate:service" : "icinga:nextupdate:host",
|
||||
Convert::ToString(checkable->GetNextUpdate()),
|
||||
GetObjectIdentifier(checkable)
|
||||
},
|
||||
Prio::CheckResult
|
||||
}
|
||||
);
|
||||
} else {
|
||||
m_Rcon->FireAndForgetQuery(
|
||||
m_RconWorker->FireAndForgetQuery(
|
||||
{
|
||||
"ZREM",
|
||||
dynamic_pointer_cast<Service>(checkable) ? "icinga:nextupdate:service" : "icinga:nextupdate:host",
|
||||
GetObjectIdentifier(checkable)
|
||||
},
|
||||
Prio::CheckResult
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -2592,7 +2488,7 @@ void IcingaDB::SendAcknowledgementSet(const Checkable::Ptr& checkable, const Str
|
|||
tie(host, service) = GetHostService(checkable);
|
||||
|
||||
/* Update checkable state as is_acknowledged may have changed. */
|
||||
UpdateState(checkable, StateUpdate::Full);
|
||||
EnqueueConfigObject(checkable, icingadb::task_queue::FullState);
|
||||
|
||||
RedisConnection::Query xAdd ({
|
||||
"XADD", "icinga:history:stream:acknowledgement", "*",
|
||||
|
|
@ -2650,7 +2546,7 @@ void IcingaDB::SendAcknowledgementCleared(const Checkable::Ptr& checkable, const
|
|||
tie(host, service) = GetHostService(checkable);
|
||||
|
||||
/* Update checkable state as is_acknowledged may have changed. */
|
||||
UpdateState(checkable, StateUpdate::Full);
|
||||
EnqueueConfigObject(checkable, icingadb::task_queue::FullState);
|
||||
|
||||
RedisConnection::Query xAdd ({
|
||||
"XADD", "icinga:history:stream:acknowledgement", "*",
|
||||
|
|
@ -2742,7 +2638,7 @@ void IcingaDB::ForwardHistoryEntries()
|
|||
|
||||
if (m_Rcon && m_Rcon->IsConnected()) {
|
||||
try {
|
||||
m_Rcon->GetResultsOfQueries(haystack, Prio::History, {0, 0, haystack.size()});
|
||||
m_Rcon->GetResultsOfQueries(haystack, {0, 0, haystack.size()});
|
||||
break;
|
||||
} catch (const std::exception& ex) {
|
||||
logFailure(ex.what());
|
||||
|
|
@ -2767,7 +2663,7 @@ void IcingaDB::ForwardHistoryEntries()
|
|||
}
|
||||
|
||||
void IcingaDB::SendNotificationUsersChanged(const Notification::Ptr& notification, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
|
||||
if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected() || oldValues == newValues) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -2775,13 +2671,15 @@ void IcingaDB::SendNotificationUsersChanged(const Notification::Ptr& notificatio
|
|||
|
||||
for (const auto& userName : deletedUsers) {
|
||||
String id = HashValue(new Array({m_EnvironmentId, "user", userName, notification->GetName()}));
|
||||
DeleteRelationship(id, CONFIG_REDIS_KEY_PREFIX "notification:user");
|
||||
DeleteRelationship(id, CONFIG_REDIS_KEY_PREFIX "notification:recipient");
|
||||
EnqueueRelationsDeletion(id,{
|
||||
{CONFIG_REDIS_KEY_PREFIX "notification:user", ""},
|
||||
{CONFIG_REDIS_KEY_PREFIX "notification:recipient", ""},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void IcingaDB::SendNotificationUserGroupsChanged(const Notification::Ptr& notification, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
|
||||
if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected() || oldValues == newValues) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -2790,18 +2688,20 @@ void IcingaDB::SendNotificationUserGroupsChanged(const Notification::Ptr& notifi
|
|||
for (const auto& userGroupName : deletedUserGroups) {
|
||||
UserGroup::Ptr userGroup = UserGroup::GetByName(userGroupName);
|
||||
String id = HashValue(new Array({m_EnvironmentId, "usergroup", userGroupName, notification->GetName()}));
|
||||
DeleteRelationship(id, CONFIG_REDIS_KEY_PREFIX "notification:usergroup");
|
||||
DeleteRelationship(id, CONFIG_REDIS_KEY_PREFIX "notification:recipient");
|
||||
EnqueueRelationsDeletion(id, {
|
||||
{CONFIG_REDIS_KEY_PREFIX "notification:usergroup", ""},
|
||||
{CONFIG_REDIS_KEY_PREFIX "notification:recipient", ""}
|
||||
});
|
||||
|
||||
for (const User::Ptr& user : userGroup->GetMembers()) {
|
||||
String userId = HashValue(new Array({m_EnvironmentId, "usergroupuser", user->GetName(), userGroupName, notification->GetName()}));
|
||||
DeleteRelationship(userId, CONFIG_REDIS_KEY_PREFIX "notification:recipient");
|
||||
EnqueueRelationsDeletion(userId, {{CONFIG_REDIS_KEY_PREFIX "notification:recipient", ""}});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void IcingaDB::SendTimePeriodRangesChanged(const TimePeriod::Ptr& timeperiod, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues) {
|
||||
if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected() || oldValues == newValues) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -2809,12 +2709,12 @@ void IcingaDB::SendTimePeriodRangesChanged(const TimePeriod::Ptr& timeperiod, co
|
|||
|
||||
for (const auto& rangeKey : deletedKeys) {
|
||||
String id = HashValue(new Array({m_EnvironmentId, rangeKey, oldValues->Get(rangeKey), timeperiod->GetName()}));
|
||||
DeleteRelationship(id, CONFIG_REDIS_KEY_PREFIX "timeperiod:range");
|
||||
EnqueueRelationsDeletion(id, {{CONFIG_REDIS_KEY_PREFIX "timeperiod:range", ""}});
|
||||
}
|
||||
}
|
||||
|
||||
void IcingaDB::SendTimePeriodIncludesChanged(const TimePeriod::Ptr& timeperiod, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
|
||||
if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected() || oldValues == newValues) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -2822,12 +2722,12 @@ void IcingaDB::SendTimePeriodIncludesChanged(const TimePeriod::Ptr& timeperiod,
|
|||
|
||||
for (const auto& includeName : deletedIncludes) {
|
||||
String id = HashValue(new Array({m_EnvironmentId, includeName, timeperiod->GetName()}));
|
||||
DeleteRelationship(id, CONFIG_REDIS_KEY_PREFIX "timeperiod:override:include");
|
||||
EnqueueRelationsDeletion(id, {{CONFIG_REDIS_KEY_PREFIX "timeperiod:override:include", ""}});
|
||||
}
|
||||
}
|
||||
|
||||
void IcingaDB::SendTimePeriodExcludesChanged(const TimePeriod::Ptr& timeperiod, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
|
||||
if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected() || oldValues == newValues) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -2835,13 +2735,13 @@ void IcingaDB::SendTimePeriodExcludesChanged(const TimePeriod::Ptr& timeperiod,
|
|||
|
||||
for (const auto& excludeName : deletedExcludes) {
|
||||
String id = HashValue(new Array({m_EnvironmentId, excludeName, timeperiod->GetName()}));
|
||||
DeleteRelationship(id, CONFIG_REDIS_KEY_PREFIX "timeperiod:override:exclude");
|
||||
EnqueueRelationsDeletion(id, {{CONFIG_REDIS_KEY_PREFIX "timeperiod:override:exclude", ""}});
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void IcingaDB::SendGroupsChanged(const ConfigObject::Ptr& object, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
|
||||
if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected() || oldValues == newValues) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -2859,14 +2759,14 @@ void IcingaDB::SendGroupsChanged(const ConfigObject::Ptr& object, const Array::P
|
|||
for (const auto& groupName : deletedGroups) {
|
||||
typename T::Ptr group = ConfigObject::GetObject<T>(groupName);
|
||||
String id = HashValue(new Array({m_EnvironmentId, group->GetName(), object->GetName()}));
|
||||
DeleteRelationship(id, keyType);
|
||||
EnqueueRelationsDeletion(id, {{keyType, ""}});
|
||||
|
||||
if (std::is_same<T, UserGroup>::value) {
|
||||
if constexpr (std::is_same_v<T, UserGroup>) {
|
||||
UserGroup::Ptr userGroup = dynamic_pointer_cast<UserGroup>(group);
|
||||
|
||||
for (const auto& notification : userGroup->GetNotifications()) {
|
||||
String userId = HashValue(new Array({m_EnvironmentId, "usergroupuser", object->GetName(), groupName, notification->GetName()}));
|
||||
DeleteRelationship(userId, CONFIG_REDIS_KEY_PREFIX "notification:recipient");
|
||||
EnqueueRelationsDeletion(userId, {{CONFIG_REDIS_KEY_PREFIX "notification:recipient", ""}});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -2879,13 +2779,13 @@ void IcingaDB::SendCommandEnvChanged(
|
|||
const Dictionary::Ptr& newValues
|
||||
)
|
||||
{
|
||||
if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected() || oldValues == newValues) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (const auto& envvarKey : GetDictionaryDeletedKeys(oldValues, newValues)) {
|
||||
String id = HashValue(new Array({m_EnvironmentId, envvarKey, command->GetName()}));
|
||||
DeleteRelationship(id, cmdRedisKeys.EnvObjectKey, cmdRedisKeys.EnvChecksumKey);
|
||||
EnqueueRelationsDeletion(id, {{cmdRedisKeys.EnvObjectKey, cmdRedisKeys.EnvChecksumKey}});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2896,13 +2796,13 @@ void IcingaDB::SendCommandArgumentsChanged(
|
|||
const Dictionary::Ptr& newValues
|
||||
)
|
||||
{
|
||||
if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected() || oldValues == newValues) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (const auto& argumentKey : GetDictionaryDeletedKeys(oldValues, newValues)) {
|
||||
String id = HashValue(new Array({m_EnvironmentId, argumentKey, command->GetName()}));
|
||||
DeleteRelationship(id, cmdRedisKeys.ArgObjectKey, cmdRedisKeys.ArgChecksumKey);
|
||||
EnqueueRelationsDeletion(id, {{cmdRedisKeys.ArgObjectKey, cmdRedisKeys.ArgChecksumKey}});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2913,7 +2813,7 @@ void IcingaDB::SendCustomVarsChanged(const ConfigObject::Ptr& object, const Dict
|
|||
return;
|
||||
}
|
||||
|
||||
if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
|
||||
if (!m_RconWorker || !m_RconWorker->IsConnected() || oldValues == newValues) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -2922,99 +2822,7 @@ void IcingaDB::SendCustomVarsChanged(const ConfigObject::Ptr& object, const Dict
|
|||
|
||||
for (const auto& varId : GetDictionaryDeletedKeys(oldVars, newVars)) {
|
||||
String id = HashValue(new Array({m_EnvironmentId, varId, object->GetName()}));
|
||||
DeleteRelationship(id, customvarKey);
|
||||
}
|
||||
}
|
||||
|
||||
void IcingaDB::SendDependencyGroupChildRegistered(const Checkable::Ptr& child, const DependencyGroup::Ptr& dependencyGroup)
|
||||
{
|
||||
if (!m_Rcon || !m_Rcon->IsConnected()) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<Dictionary::Ptr> runtimeUpdates;
|
||||
std::map<RedisConnection::QueryArg, RedisConnection::Query> hMSets;
|
||||
InsertCheckableDependencies(child, hMSets, &runtimeUpdates, dependencyGroup);
|
||||
ExecuteRedisTransaction(m_Rcon, hMSets, runtimeUpdates);
|
||||
|
||||
UpdateState(child, StateUpdate::Full);
|
||||
UpdateDependenciesState(child, dependencyGroup);
|
||||
|
||||
std::set<Checkable::Ptr> parents;
|
||||
dependencyGroup->LoadParents(parents);
|
||||
for (const auto& parent : parents) {
|
||||
// The total_children and affects_children columns might now have different outcome, so update the parent
|
||||
// Checkable as well. The grandparent Checkable may still have wrong numbers of total children, though it's not
|
||||
// worth traversing the whole tree way up and sending config updates for each one of them, as the next Redis
|
||||
// config dump is going to fix it anyway.
|
||||
SendConfigUpdate(parent, true);
|
||||
}
|
||||
}
|
||||
|
||||
void IcingaDB::SendDependencyGroupChildRemoved(
|
||||
const DependencyGroup::Ptr& dependencyGroup,
|
||||
const std::vector<Dependency::Ptr>& dependencies,
|
||||
bool removeGroup
|
||||
)
|
||||
{
|
||||
if (!m_Rcon || !m_Rcon->IsConnected() || dependencies.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Checkable::Ptr child;
|
||||
std::set<Checkable*> detachedParents;
|
||||
for (const auto& dependency : dependencies) {
|
||||
child = dependency->GetChild(); // All dependencies have the same child.
|
||||
const auto& parent(dependency->GetParent());
|
||||
if (auto [_, inserted] = detachedParents.insert(dependency->GetParent().get()); inserted) {
|
||||
String edgeId;
|
||||
if (dependencyGroup->IsRedundancyGroup()) {
|
||||
// If the redundancy group has no members left, it's going to be removed as well, so we need to
|
||||
// delete dependency edges from that group to the parent Checkables.
|
||||
if (removeGroup) {
|
||||
auto id(HashValue(new Array{dependencyGroup->GetIcingaDBIdentifier(), GetObjectIdentifier(parent)}));
|
||||
DeleteRelationship(id, CONFIG_REDIS_KEY_PREFIX "dependency:edge");
|
||||
DeleteState(id, CONFIG_REDIS_KEY_PREFIX "dependency:edge:state");
|
||||
}
|
||||
|
||||
// Remove the connection from the child Checkable to the redundancy group.
|
||||
edgeId = HashValue(new Array{GetObjectIdentifier(child), dependencyGroup->GetIcingaDBIdentifier()});
|
||||
} else {
|
||||
// Remove the edge between the parent and child Checkable linked through the removed dependency.
|
||||
edgeId = HashValue(new Array{GetObjectIdentifier(child), GetObjectIdentifier(parent)});
|
||||
}
|
||||
|
||||
DeleteRelationship(edgeId, CONFIG_REDIS_KEY_PREFIX "dependency:edge");
|
||||
|
||||
// The total_children and affects_children columns might now have different outcome, so update the parent
|
||||
// Checkable as well. The grandparent Checkable may still have wrong numbers of total children, though it's
|
||||
// not worth traversing the whole tree way up and sending config updates for each one of them, as the next
|
||||
// Redis config dump is going to fix it anyway.
|
||||
SendConfigUpdate(parent, true);
|
||||
|
||||
if (!parent->HasAnyDependencies()) {
|
||||
// If the parent Checkable isn't part of any other dependency chain anymore, drop its dependency node entry.
|
||||
DeleteRelationship(GetObjectIdentifier(parent), CONFIG_REDIS_KEY_PREFIX "dependency:node");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (removeGroup && dependencyGroup->IsRedundancyGroup()) {
|
||||
String redundancyGroupId(dependencyGroup->GetIcingaDBIdentifier());
|
||||
DeleteRelationship(redundancyGroupId, CONFIG_REDIS_KEY_PREFIX "dependency:node");
|
||||
DeleteRelationship(redundancyGroupId, CONFIG_REDIS_KEY_PREFIX "redundancygroup");
|
||||
|
||||
DeleteState(redundancyGroupId, CONFIG_REDIS_KEY_PREFIX "redundancygroup:state");
|
||||
DeleteState(redundancyGroupId, CONFIG_REDIS_KEY_PREFIX "dependency:edge:state");
|
||||
} else if (removeGroup) {
|
||||
// Note: The Icinga DB identifier of a non-redundant dependency group is used as the edge state ID
|
||||
// and shared by all of its dependency objects. See also SerializeDependencyEdgeState() for details.
|
||||
DeleteState(dependencyGroup->GetIcingaDBIdentifier(), CONFIG_REDIS_KEY_PREFIX "dependency:edge:state");
|
||||
}
|
||||
|
||||
if (!child->HasAnyDependencies()) {
|
||||
// If the child Checkable has no parent and reverse dependencies, we can safely remove the dependency node.
|
||||
DeleteRelationship(GetObjectIdentifier(child), CONFIG_REDIS_KEY_PREFIX "dependency:node");
|
||||
EnqueueRelationsDeletion(id, {{customvarKey, ""}});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3146,49 +2954,6 @@ Dictionary::Ptr IcingaDB::SerializeState(const Checkable::Ptr& checkable)
|
|||
return attrs;
|
||||
}
|
||||
|
||||
std::vector<String>
|
||||
IcingaDB::UpdateObjectAttrs(const ConfigObject::Ptr& object, int fieldType,
|
||||
const String& typeNameOverride)
|
||||
{
|
||||
Type::Ptr type = object->GetReflectionType();
|
||||
Dictionary::Ptr attrs(new Dictionary);
|
||||
|
||||
for (int fid = 0; fid < type->GetFieldCount(); fid++) {
|
||||
Field field = type->GetFieldInfo(fid);
|
||||
|
||||
if ((field.Attributes & fieldType) == 0)
|
||||
continue;
|
||||
|
||||
Value val = object->GetField(fid);
|
||||
|
||||
/* hide attributes which shouldn't be user-visible */
|
||||
if (field.Attributes & FANoUserView)
|
||||
continue;
|
||||
|
||||
/* hide internal navigation fields */
|
||||
if (field.Attributes & FANavigation && !(field.Attributes & (FAConfig | FAState)))
|
||||
continue;
|
||||
|
||||
attrs->Set(field.Name, Serialize(val));
|
||||
}
|
||||
|
||||
/* Downtimes require in_effect, which is not an attribute */
|
||||
Downtime::Ptr downtime = dynamic_pointer_cast<Downtime>(object);
|
||||
if (downtime) {
|
||||
attrs->Set("in_effect", Serialize(downtime->IsInEffect()));
|
||||
attrs->Set("trigger_time", Serialize(TimestampToMilliseconds(downtime->GetTriggerTime())));
|
||||
}
|
||||
|
||||
|
||||
/* Use the name checksum as unique key. */
|
||||
String typeName = type->GetName().ToLower();
|
||||
if (!typeNameOverride.IsEmpty())
|
||||
typeName = typeNameOverride.ToLower();
|
||||
|
||||
return {GetObjectIdentifier(object), JsonEncode(attrs)};
|
||||
//m_Rcon->FireAndForgetQuery({"HSET", keyPrefix + typeName, GetObjectIdentifier(object), JsonEncode(attrs)});
|
||||
}
|
||||
|
||||
void IcingaDB::StateChangeHandler(const ConfigObject::Ptr& object, const CheckResult::Ptr& cr, StateType type)
|
||||
{
|
||||
for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
|
||||
|
|
@ -3199,10 +2964,11 @@ void IcingaDB::StateChangeHandler(const ConfigObject::Ptr& object, const CheckRe
|
|||
void IcingaDB::ReachabilityChangeHandler(const std::set<Checkable::Ptr>& children)
|
||||
{
|
||||
for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
|
||||
std::set<DependencyGroup*> seenGroups;
|
||||
for (auto& checkable : children) {
|
||||
rw->UpdateState(checkable, StateUpdate::Full);
|
||||
rw->UpdateDependenciesState(checkable, nullptr, &seenGroups);
|
||||
rw->EnqueueConfigObject(checkable, icingadb::task_queue::FullState);
|
||||
for (const auto& dependencyGroup : checkable->GetDependencyGroups()) {
|
||||
rw->EnqueueDependencyGroupStateUpdate(dependencyGroup);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3218,17 +2984,13 @@ void IcingaDB::VersionChangedHandler(const ConfigObject::Ptr& object)
|
|||
}
|
||||
|
||||
if (object->IsActive()) {
|
||||
// Create or update the object config
|
||||
for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
|
||||
if (rw)
|
||||
rw->SendConfigUpdate(object, true);
|
||||
// A runtime config change triggers also a full state update as well as next update event.
|
||||
rw->EnqueueConfigObject(object, icingadb::task_queue::ConfigUpdate | icingadb::task_queue::FullState | icingadb::task_queue::NextUpdate);
|
||||
}
|
||||
} else if (!object->IsActive() &&
|
||||
object->GetExtension("ConfigObjectDeleted")) { // same as in apilistener-configsync.cpp
|
||||
// Delete object config
|
||||
} else if (!object->IsActive() && object->GetExtension("ConfigObjectDeleted")) { // same as in apilistener-configsync.cpp
|
||||
for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
|
||||
if (rw)
|
||||
rw->SendConfigDelete(object);
|
||||
rw->SendConfigDelete(object);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3288,37 +3050,47 @@ void IcingaDB::FlappingChangeHandler(const Checkable::Ptr& checkable, double cha
|
|||
void IcingaDB::NewCheckResultHandler(const Checkable::Ptr& checkable)
|
||||
{
|
||||
for (auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
|
||||
rw->UpdateState(checkable, StateUpdate::Volatile);
|
||||
rw->SendNextUpdate(checkable);
|
||||
rw->EnqueueConfigObject(checkable, icingadb::task_queue::VolatileState);
|
||||
}
|
||||
}
|
||||
|
||||
void IcingaDB::NextCheckUpdatedHandler(const Checkable::Ptr& checkable)
|
||||
void IcingaDB::NextCheckChangedHandler(const Checkable::Ptr& checkable)
|
||||
{
|
||||
for (auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
|
||||
rw->UpdateState(checkable, StateUpdate::Volatile);
|
||||
rw->SendNextUpdate(checkable);
|
||||
rw->EnqueueConfigObject(checkable, icingadb::task_queue::VolatileState | icingadb::task_queue::NextUpdate);
|
||||
}
|
||||
}
|
||||
|
||||
void IcingaDB::DependencyGroupChildRegisteredHandler(const Checkable::Ptr& child, const DependencyGroup::Ptr& dependencyGroup)
|
||||
{
|
||||
for (const auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
|
||||
rw->SendDependencyGroupChildRegistered(child, dependencyGroup);
|
||||
rw->EnqueueConfigObject(child, icingadb::task_queue::FullState); // Child requires a full state update.
|
||||
rw->EnqueueDependencyChildRegistered(dependencyGroup, child);
|
||||
rw->EnqueueDependencyGroupStateUpdate(dependencyGroup);
|
||||
|
||||
std::set<Checkable::Ptr> parents;
|
||||
dependencyGroup->LoadParents(parents);
|
||||
for (const auto& parent : parents) {
|
||||
// The total_children and affects_children columns might now have different outcome, so update the parent
|
||||
// Checkable as well. The grandparent Checkable may still have wrong numbers of total children, though it's
|
||||
// not worth traversing the whole tree way up and sending config updates for each one of them, as the next
|
||||
// Redis config dump is going to fix it anyway.
|
||||
rw->EnqueueConfigObject(parent, icingadb::task_queue::ConfigUpdate | icingadb::task_queue::FullState);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void IcingaDB::DependencyGroupChildRemovedHandler(const DependencyGroup::Ptr& dependencyGroup, const std::vector<Dependency::Ptr>& dependencies, bool removeGroup)
|
||||
{
|
||||
for (const auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
|
||||
rw->SendDependencyGroupChildRemoved(dependencyGroup, dependencies, removeGroup);
|
||||
rw->EnqueueDependencyChildRemoved(dependencyGroup, dependencies, removeGroup);
|
||||
}
|
||||
}
|
||||
|
||||
void IcingaDB::HostProblemChangedHandler(const Service::Ptr& service) {
|
||||
for (auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
|
||||
/* Host state changes affect is_handled and severity of services. */
|
||||
rw->UpdateState(service, StateUpdate::Full);
|
||||
rw->EnqueueConfigObject(service, icingadb::task_queue::FullState);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3431,7 +3203,7 @@ void IcingaDB::DeleteRelationship(const String& id, RedisConnection::QueryArg re
|
|||
"redis_key", std::move(redisObjKey), "id", id, "runtime_type", "delete"
|
||||
});
|
||||
|
||||
m_Rcon->FireAndForgetQueries(queries, Prio::Config);
|
||||
m_RconWorker->FireAndForgetQueries(queries);
|
||||
}
|
||||
|
||||
void IcingaDB::DeleteState(const String& id, RedisConnection::QueryArg redisObjKey, RedisConnection::QueryArg redisChecksumKey) const
|
||||
|
|
@ -3444,7 +3216,7 @@ void IcingaDB::DeleteState(const String& id, RedisConnection::QueryArg redisObjK
|
|||
hdels.push_back({"HDEL", std::move(redisChecksumKey), id});
|
||||
}
|
||||
|
||||
m_Rcon->FireAndForgetQueries(std::move(hdels), Prio::RuntimeStateSync);
|
||||
m_RconWorker->FireAndForgetQueries(std::move(hdels));
|
||||
// TODO: This is currently purposefully commented out due to how Icinga DB (Go) handles runtime state
|
||||
// upsert and delete events. See https://github.com/Icinga/icingadb/pull/894 for more details.
|
||||
/*m_Rcon->FireAndForgetQueries({{
|
||||
|
|
@ -3454,7 +3226,7 @@ void IcingaDB::DeleteState(const String& id, RedisConnection::QueryArg redisObjK
|
|||
}
|
||||
|
||||
/**
|
||||
* Add the provided data to the Redis HMSETs map.
|
||||
* Add the provided data to the provided map of HMSET queries.
|
||||
*
|
||||
* @param hMSets The map of HMSETs to add the provided data to.
|
||||
* @param redisKey The Redis key to which the HMSET query should be added.
|
||||
|
|
@ -3513,11 +3285,11 @@ void IcingaDB::ExecuteRedisTransaction(const RedisConnection::Ptr& rcon,
|
|||
if (transaction.size() > 1) {
|
||||
transaction.emplace_back(RedisConnection::Query{"EXEC"});
|
||||
if (!runtimeUpdates.empty()) {
|
||||
rcon->FireAndForgetQueries(std::move(transaction), Prio::Config, {1});
|
||||
rcon->FireAndForgetQueries(std::move(transaction), {1});
|
||||
} else {
|
||||
// This is likely triggered by the initial Redis config dump, so a) we don't need to record the number of
|
||||
// affected objects and b) we don't really know how many objects are going to be affected by this tx.
|
||||
rcon->FireAndForgetQueries(std::move(transaction), Prio::Config);
|
||||
rcon->FireAndForgetQueries(std::move(transaction));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,18 @@
|
|||
|
||||
using namespace icinga;
|
||||
|
||||
/**
|
||||
* Checks if the given Redis key is a state key (ends with ":state").
|
||||
*
|
||||
* @param key The Redis key to check.
|
||||
*
|
||||
* @return true if the key is a state key, false otherwise.
|
||||
*/
|
||||
bool IcingaDB::IsStateKey(const RedisConnection::QueryArg& key)
|
||||
{
|
||||
return boost::algorithm::ends_with(static_cast<std::string_view>(key), ":state");
|
||||
}
|
||||
|
||||
String IcingaDB::FormatCheckSumBinary(const String& str)
|
||||
{
|
||||
char output[20*2+1];
|
||||
|
|
@ -170,30 +182,8 @@ Dictionary::Ptr IcingaDB::SerializeVars(const Dictionary::Ptr& vars)
|
|||
*/
|
||||
Dictionary::Ptr IcingaDB::SerializeDependencyEdgeState(const DependencyGroup::Ptr& dependencyGroup, const Dependency::Ptr& dep)
|
||||
{
|
||||
String edgeStateId;
|
||||
// The edge state ID is computed a bit differently depending on whether this is for a redundancy group or not.
|
||||
// For redundancy groups, the state ID is supposed to represent the connection state between the redundancy group
|
||||
// and the parent Checkable of the given dependency. Hence, the outcome will always be different for each parent
|
||||
// Checkable of the redundancy group.
|
||||
if (dependencyGroup->IsRedundancyGroup()) {
|
||||
edgeStateId = HashValue(new Array{
|
||||
dependencyGroup->GetIcingaDBIdentifier(),
|
||||
GetObjectIdentifier(dep->GetParent()),
|
||||
});
|
||||
} else if (dependencyGroup->GetIcingaDBIdentifier().IsEmpty()) {
|
||||
// For non-redundant dependency groups, on the other hand, all dependency objects within that group will
|
||||
// always have the same parent Checkable. Likewise, the state ID will be always the same as well it doesn't
|
||||
// matter which dependency object is used to compute it. Therefore, it's sufficient to compute it only once
|
||||
// and all the other dependency objects can reuse the cached state ID.
|
||||
edgeStateId = HashValue(new Array{dependencyGroup->GetCompositeKey(), GetObjectIdentifier(dep->GetParent())});
|
||||
dependencyGroup->SetIcingaDBIdentifier(edgeStateId);
|
||||
} else {
|
||||
// Use the already computed state ID for the dependency group.
|
||||
edgeStateId = dependencyGroup->GetIcingaDBIdentifier();
|
||||
}
|
||||
|
||||
return new Dictionary{
|
||||
{"id", std::move(edgeStateId)},
|
||||
{"id", GetDependencyEdgeStateId(dependencyGroup, dep)},
|
||||
{"environment_id", m_EnvironmentId},
|
||||
{"failed", !dep->IsAvailable(DependencyState) || !dep->GetParent()->IsReachable()}
|
||||
};
|
||||
|
|
@ -220,6 +210,42 @@ Dictionary::Ptr IcingaDB::SerializeRedundancyGroupState(const Checkable::Ptr& ch
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the dependency edge state ID for the given dependency object.
|
||||
*
|
||||
* The edge state ID is computed a bit differently depending on whether this is for a redundancy group or not.
|
||||
* For redundancy groups, the state ID is supposed to represent the connection state between the redundancy group
|
||||
* and the parent Checkable of the given dependency. Hence, the outcome will always be different for each parent
|
||||
* Checkable of the redundancy group.
|
||||
*
|
||||
* For non-redundant dependency groups, on the other hand, all dependency objects within that group will
|
||||
* always have the same parent Checkable. Likewise, the state ID will be always the same as well it doesn't
|
||||
* matter which dependency object is used to compute it. Therefore, it's sufficient to compute it only once
|
||||
* and all the other dependency objects can reuse the cached state ID. Thus, this function will cache the just
|
||||
* computed state ID in the dependency group object itself for later reuse.
|
||||
*
|
||||
* @param dependencyGroup The dependency group the dependency is part of.
|
||||
* @param dep The dependency object to compute the state ID for.
|
||||
*
|
||||
* @return The computed edge state ID.
|
||||
*/
|
||||
String IcingaDB::GetDependencyEdgeStateId(const DependencyGroup::Ptr& dependencyGroup, const Dependency::Ptr& dep)
|
||||
{
|
||||
if (dependencyGroup->IsRedundancyGroup()) {
|
||||
return HashValue(new Array{
|
||||
dependencyGroup->GetIcingaDBIdentifier(),
|
||||
GetObjectIdentifier(dep->GetParent()),
|
||||
});
|
||||
}
|
||||
if (dependencyGroup->GetIcingaDBIdentifier().IsEmpty()) {
|
||||
auto edgeStateId = HashValue(new Array{dependencyGroup->GetCompositeKey(), GetObjectIdentifier(dep->GetParent())});
|
||||
dependencyGroup->SetIcingaDBIdentifier(edgeStateId);
|
||||
return edgeStateId;
|
||||
}
|
||||
// Use the already computed state ID for the dependency group.
|
||||
return dependencyGroup->GetIcingaDBIdentifier();
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts the given filter to its Redis value representation.
|
||||
*
|
||||
|
|
|
|||
419
lib/icingadb/icingadb-worker.cpp
Normal file
419
lib/icingadb/icingadb-worker.cpp
Normal file
|
|
@ -0,0 +1,419 @@
|
|||
// SPDX-FileCopyrightText: 2025 Icinga GmbH <https://icinga.com>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "icingadb/icingadb.hpp"
|
||||
#include "base/logger.hpp"
|
||||
#include <vector>
|
||||
|
||||
using namespace icinga;
|
||||
|
||||
icingadb::task_queue::PendingConfigItem::PendingConfigItem(const ConfigObject::Ptr& obj, uint32_t bits)
|
||||
: Object{obj}, DirtyBits{bits & DirtyBitsAll}
|
||||
{
|
||||
}
|
||||
|
||||
icingadb::task_queue::PendingDependencyGroupStateItem::PendingDependencyGroupStateItem(const DependencyGroup::Ptr& depGroup)
|
||||
: DepGroup{depGroup}
|
||||
{
|
||||
}
|
||||
|
||||
icingadb::task_queue::PendingDependencyEdgeItem::PendingDependencyEdgeItem(const DependencyGroup::Ptr& depGroup, const Checkable::Ptr& child)
|
||||
: DepGroup{depGroup}, Child{child}
|
||||
{
|
||||
}
|
||||
|
||||
icingadb::task_queue::RelationsDeletionItem::RelationsDeletionItem(const String& id, const RelationsKeySet& relations)
|
||||
: ID{id}, Relations{relations}
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Background worker thread procedure for processing pending items.
|
||||
*
|
||||
* This function runs in a separate thread and continuously processes pending items that have been
|
||||
* enqueued for Redis updates. It waits for new items to be added to the pending items container,
|
||||
* and processes them one at a time, ensuring that the Redis connection is active and not overloaded
|
||||
* with too many pending queries. The function also implements a delay mechanism to allow for potential
|
||||
* additional changes to be merged into the same item before processing it.
|
||||
*/
|
||||
void IcingaDB::PendingItemsThreadProc()
|
||||
{
|
||||
namespace ch = std::chrono;
|
||||
namespace queue = icingadb::task_queue;
|
||||
|
||||
// Limits the number of pending queries the Rcon can have at any given time to reduce the memory overhead to
|
||||
// the absolute minimum necessary, since the size of the pending queue items is much smaller than the size
|
||||
// of the actual Redis queries. Thus, this will slow down the worker thread a bit from generating too many
|
||||
// Redis queries when the Redis connection is saturated.
|
||||
constexpr std::size_t maxPendingQueries = 128;
|
||||
// The minimum age an item must have before it can be processed.
|
||||
constexpr ch::milliseconds minItemAge{300};
|
||||
|
||||
std::unique_lock lock(m_PendingItemsMutex);
|
||||
// Wait until the initial config dump is done. IcingaDB::OnConnectedHandler will notify us once it's finished.
|
||||
while (GetActive() && !m_ConfigDumpDone) m_PendingItemsCV.wait(lock);
|
||||
|
||||
auto& seqView = m_PendingItems.get<1>();
|
||||
while (GetActive()) {
|
||||
if (!m_PendingItems.empty() && m_RconWorker && m_RconWorker->IsConnected() && m_RconWorker->GetPendingQueryCount() < maxPendingQueries) {
|
||||
auto now = ch::steady_clock::now();
|
||||
auto it = seqView.begin();
|
||||
if (auto age = now - it->EnqueueTime; minItemAge > age) {
|
||||
m_PendingItemsCV.wait_for(lock, minItemAge - age);
|
||||
} else {
|
||||
auto itemToProcess = *it;
|
||||
seqView.erase(it);
|
||||
|
||||
lock.unlock();
|
||||
std::visit([this](auto &item) {
|
||||
try {
|
||||
ProcessQueueItem(item);
|
||||
} catch (const std::exception& ex) {
|
||||
Log(LogCritical, "IcingaDB")
|
||||
<< "Exception while processing pending item of type '" << typeid(decltype(item)).name()
|
||||
<< "': " << DiagnosticInformation(ex, GetActive());
|
||||
}
|
||||
}, itemToProcess.Item);
|
||||
lock.lock();
|
||||
}
|
||||
} else {
|
||||
// In case we don't receive any notification, we still want to wake up periodically on our own
|
||||
// to check whether we can proceed (e.g. the Redis connection might have become available again and there
|
||||
// was no activity on the pending items queue to trigger a notification). Thus, we use a timed wait here.
|
||||
m_PendingItemsCV.wait_for(lock, 100ms);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the pending configuration item.
|
||||
*
|
||||
* This function processes the pending configuration item by performing the necessary Redis operations based
|
||||
* on the dirty bits set for the associated configuration object. It handles configuration deletions, updates,
|
||||
* and state updates for checkable objects.
|
||||
*
|
||||
* @param item The queue item to process.
|
||||
*/
|
||||
void IcingaDB::ProcessQueueItem(const icingadb::task_queue::PendingConfigItem& item)
|
||||
{
|
||||
namespace queue = icingadb::task_queue;
|
||||
|
||||
ObjectLock olock(item.Object);
|
||||
|
||||
if (item.DirtyBits & queue::ConfigDelete) {
|
||||
auto redisKeyPair = GetSyncableTypeRedisKeys(item.Object->GetReflectionType());
|
||||
m_RconWorker->FireAndForgetQueries(
|
||||
{
|
||||
{"HDEL", redisKeyPair.ObjectKey, GetObjectIdentifier(item.Object)},
|
||||
{"HDEL", redisKeyPair.ChecksumKey, GetObjectIdentifier(item.Object)},
|
||||
{
|
||||
"XADD",
|
||||
"icinga:runtime",
|
||||
"MAXLEN",
|
||||
"~",
|
||||
"1000000",
|
||||
"*",
|
||||
"redis_key",
|
||||
redisKeyPair.ObjectKey,
|
||||
"id",
|
||||
GetObjectIdentifier(item.Object),
|
||||
"runtime_type",
|
||||
"delete"
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
if (item.DirtyBits & queue::ConfigUpdate) {
|
||||
std::map<RedisConnection::QueryArg, RedisConnection::Query> hMSets;
|
||||
std::vector<Dictionary::Ptr> runtimeUpdates;
|
||||
CreateConfigUpdate(item.Object, GetSyncableTypeRedisKeys(item.Object->GetReflectionType()), hMSets, runtimeUpdates, true);
|
||||
ExecuteRedisTransaction(m_RconWorker, hMSets, runtimeUpdates);
|
||||
}
|
||||
|
||||
if (auto checkable = dynamic_pointer_cast<Checkable>(item.Object); checkable) {
|
||||
if (item.DirtyBits & queue::FullState) {
|
||||
UpdateState(checkable, item.DirtyBits);
|
||||
}
|
||||
if (item.DirtyBits & queue::NextUpdate) {
|
||||
SendNextUpdate(checkable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the pending dependency group state item.
|
||||
*
|
||||
* This function processes the pending dependency group state item by updating the state of the
|
||||
* dependency group in Redis. It selects any child checkable from the dependency group to initiate
|
||||
* the state update, as all children share the same dependency group state.
|
||||
*
|
||||
* @param item The queue item to process.
|
||||
*/
|
||||
void IcingaDB::ProcessQueueItem(const icingadb::task_queue::PendingDependencyGroupStateItem& item) const
|
||||
{
|
||||
// For dependency group state updates, we don't actually care which child triggered the update,
|
||||
// since all children share the same dependency group state. Thus, we can just pick any child to
|
||||
// start the update from.
|
||||
if (auto child = item.DepGroup->GetAnyChild(); child) {
|
||||
UpdateDependenciesState(child, item.DepGroup);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the pending dependency edge item.
|
||||
*
|
||||
* This function processes the pending dependency edge item and ensures that the necessary Redis
|
||||
* operations are performed to register the child checkable as part of the dependency group.
|
||||
*
|
||||
* @param item The queue item to process.
|
||||
*/
|
||||
void IcingaDB::ProcessQueueItem(const icingadb::task_queue::PendingDependencyEdgeItem& item)
|
||||
{
|
||||
ObjectLock olock(item.Child);
|
||||
|
||||
std::vector<Dictionary::Ptr> runtimeUpdates;
|
||||
std::map<RedisConnection::QueryArg, RedisConnection::Query> hMSets;
|
||||
InsertCheckableDependencies(item.Child, hMSets, &runtimeUpdates, item.DepGroup);
|
||||
ExecuteRedisTransaction(m_RconWorker, hMSets, runtimeUpdates);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the pending relations deletion item.
|
||||
*
|
||||
* This function processes the pending relations deletion item by deleting the specified relations
|
||||
* from Redis. It iterates over the map of Redis keys and deletes the relations associated with
|
||||
* the given ID.
|
||||
*
|
||||
* @param item The queue item to process.
|
||||
*/
|
||||
void IcingaDB::ProcessQueueItem(const icingadb::task_queue::RelationsDeletionItem& item)
|
||||
{
|
||||
for (const auto& [configKey, checksumKey] : item.Relations) {
|
||||
if (IsStateKey(configKey)) {
|
||||
DeleteState(item.ID, configKey, checksumKey);
|
||||
} else {
|
||||
DeleteRelationship(item.ID, configKey, checksumKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Enqueue a configuration object for processing in the pending objects thread.
|
||||
*
|
||||
* @param object The configuration object to be enqueued for processing.
|
||||
* @param bits The dirty bits indicating the type of changes to be processed for the object.
|
||||
*/
|
||||
void IcingaDB::EnqueueConfigObject(const ConfigObject::Ptr& object, uint32_t bits)
|
||||
{
|
||||
namespace queue = icingadb::task_queue;
|
||||
|
||||
if (!GetActive() || !m_RconWorker || !m_RconWorker->IsConnected()) {
|
||||
return; // No need to enqueue anything if we're not connected.
|
||||
}
|
||||
|
||||
{
|
||||
std::lock_guard lock(m_PendingItemsMutex);
|
||||
if (auto [it, inserted] = m_PendingItems.emplace(queue::PendingConfigItem{object, bits}); !inserted) {
|
||||
m_PendingItems.modify(it, [bits](queue::PendingQueueItem& item) {
|
||||
auto& configItem = std::get<queue::PendingConfigItem>(item.Item);
|
||||
if (bits & queue::ConfigDelete) {
|
||||
configItem.DirtyBits &= ~(queue::ConfigUpdate | queue::FullState);
|
||||
} else if (bits & queue::ConfigUpdate) {
|
||||
configItem.DirtyBits &= ~queue::ConfigDelete;
|
||||
}
|
||||
configItem.DirtyBits |= bits & queue::DirtyBitsAll;
|
||||
});
|
||||
}
|
||||
}
|
||||
m_PendingItemsCV.notify_one();
|
||||
}
|
||||
|
||||
void IcingaDB::EnqueueDependencyGroupStateUpdate(const DependencyGroup::Ptr& depGroup)
|
||||
{
|
||||
if (GetActive() && m_RconWorker && m_RconWorker->IsConnected()) {
|
||||
{
|
||||
std::lock_guard lock(m_PendingItemsMutex);
|
||||
m_PendingItems.emplace(icingadb::task_queue::PendingDependencyGroupStateItem{depGroup});
|
||||
}
|
||||
m_PendingItemsCV.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Enqueue the registration of a dependency child to a dependency group.
|
||||
*
|
||||
* This function adds a pending item to the queue for processing the registration of a child checkable
|
||||
* to a dependency group. If there is no active Redis connection available, this function is a no-op.
|
||||
*
|
||||
* @param depGroup The dependency group to which the child is being registered.
|
||||
* @param child The child checkable being registered to the dependency group.
|
||||
*/
|
||||
void IcingaDB::EnqueueDependencyChildRegistered(const DependencyGroup::Ptr& depGroup, const Checkable::Ptr& child)
|
||||
{
|
||||
if (GetActive() && m_RconWorker && m_RconWorker->IsConnected()) {
|
||||
{
|
||||
std::lock_guard lock(m_PendingItemsMutex);
|
||||
m_PendingItems.emplace(icingadb::task_queue::PendingDependencyEdgeItem{depGroup, child});
|
||||
}
|
||||
m_PendingItemsCV.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Enqueue the removal of a dependency child from a dependency group.
|
||||
*
|
||||
* This function handles the removal of a child checkable from a dependency group by first checking if there
|
||||
* are any pending registration items for the same child and dependency group. If such an item exists, it is
|
||||
* removed from the pending items queue, effectively canceling the registration. If there is also a pending
|
||||
* dependency group state update triggered by the same child, it is either removed or updated to use a different
|
||||
* child if the group is not being removed entirely. If no pending registration exists, the function proceeds
|
||||
* to enqueue the necessary deletions in Redis for the dependencies and related nodes and edges.
|
||||
*
|
||||
* @param depGroup The dependency group from which the child is being removed.
|
||||
* @param dependencies The list of dependencies associated with the child being removed.
|
||||
* @param removeGroup A flag indicating whether the entire dependency group should be removed.
|
||||
*/
|
||||
void IcingaDB::EnqueueDependencyChildRemoved(
|
||||
const DependencyGroup::Ptr& depGroup,
|
||||
const std::vector<Dependency::Ptr>& dependencies,
|
||||
bool removeGroup
|
||||
)
|
||||
{
|
||||
namespace queue = icingadb::task_queue;
|
||||
|
||||
if (dependencies.empty() || !GetActive() || !m_RconWorker || !m_RconWorker->IsConnected()) {
|
||||
return; // No need to enqueue anything if we're not connected or there are no dependencies.
|
||||
}
|
||||
|
||||
Checkable::Ptr child(dependencies.front()->GetChild());
|
||||
bool cancelledRegistration = false;
|
||||
|
||||
{
|
||||
std::lock_guard lock(m_PendingItemsMutex);
|
||||
if (m_PendingItems.erase(std::make_pair(depGroup.get(), child.get())) > 0) {
|
||||
cancelledRegistration = true;
|
||||
if (removeGroup) {
|
||||
// If we're removing the entire group registration, we can also drop any pending dependency group
|
||||
// state update triggered previously as it should no longer have any children left.
|
||||
m_PendingItems.erase(depGroup.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!child->HasAnyDependencies()) {
|
||||
// If the child Checkable has no parent and reverse dependencies, we can safely remove the dependency node.
|
||||
// This might be a no-op in some cases (e.g. if the child's only dependency was the one that we just canceled
|
||||
// above), but since we can't reliably determine whether the node exists in Redis or not, we just enqueue the
|
||||
// deletion anyway.
|
||||
EnqueueRelationsDeletion(GetObjectIdentifier(child), {{CONFIG_REDIS_KEY_PREFIX "dependency:node", ""}});
|
||||
}
|
||||
|
||||
if (cancelledRegistration && depGroup->GetIcingaDBIdentifier().IsEmpty()) {
|
||||
// If we had a pending registration that we just canceled above, and the dependency group has no
|
||||
// IcingaDB identifier yet, then there's no need to proceed with any deletions, as the dependency
|
||||
// group was never serialized to Redis in the first place.
|
||||
return;
|
||||
}
|
||||
|
||||
if (depGroup->GetIcingaDBIdentifier().IsEmpty()) {
|
||||
// An empty IcingaDB identifier indicates that the worker thread has just picked up the registration of the
|
||||
// first child (removed from the pending items queue) but hasn't yet entered the InsertCheckableDependencies()
|
||||
// function to actually fill in the IcingaDB identifier. Thus, we need to generate and set it here to ensure
|
||||
// that the relation deletions below use the correct identifier.
|
||||
if (depGroup->IsRedundancyGroup()) {
|
||||
// Keep this with IcingaDB::InsertCheckableDependencies in sync!
|
||||
depGroup->SetIcingaDBIdentifier(HashValue(new Array{m_EnvironmentId, depGroup->GetCompositeKey()}));
|
||||
} else {
|
||||
// This will set the IcingaDB identifier of the dependency group as a side effect.
|
||||
(void)GetDependencyEdgeStateId(depGroup, dependencies.front());
|
||||
}
|
||||
}
|
||||
|
||||
std::set<Checkable*> detachedParents;
|
||||
for (const auto& dependency : dependencies) {
|
||||
const auto& parent(dependency->GetParent());
|
||||
if (auto [_, inserted] = detachedParents.insert(dependency->GetParent().get()); inserted) {
|
||||
String edgeId;
|
||||
if (depGroup->IsRedundancyGroup()) {
|
||||
// If the redundancy group has no members left, it's going to be removed as well, so we need to
|
||||
// delete dependency edges from that group to the parent Checkables.
|
||||
if (removeGroup) {
|
||||
EnqueueRelationsDeletion(
|
||||
GetDependencyEdgeStateId(depGroup, dependency),
|
||||
{
|
||||
{CONFIG_REDIS_KEY_PREFIX "dependency:edge", ""},
|
||||
{CONFIG_REDIS_KEY_PREFIX "dependency:edge:state", ""},
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Remove the connection from the child Checkable to the redundancy group.
|
||||
edgeId = HashValue(new Array{GetObjectIdentifier(child), depGroup->GetIcingaDBIdentifier()});
|
||||
} else {
|
||||
// Remove the edge between the parent and child Checkable linked through the removed dependency.
|
||||
edgeId = HashValue(new Array{GetObjectIdentifier(child), GetObjectIdentifier(parent)});
|
||||
}
|
||||
|
||||
EnqueueRelationsDeletion(edgeId, {{CONFIG_REDIS_KEY_PREFIX "dependency:edge", ""}});
|
||||
|
||||
// The total_children and affects_children columns might now have different outcome, so update the parent
|
||||
// Checkable as well. The grandparent Checkable may still have wrong numbers of total children, though it's
|
||||
// not worth traversing the whole tree way up and sending config updates for each one of them, as the next
|
||||
// Redis config dump is going to fix it anyway.
|
||||
EnqueueConfigObject(parent, queue::ConfigUpdate);
|
||||
|
||||
if (!parent->HasAnyDependencies()) {
|
||||
// If the parent Checkable isn't part of any other dependency chain anymore, drop its dependency node entry.
|
||||
EnqueueRelationsDeletion(GetObjectIdentifier(parent), {{CONFIG_REDIS_KEY_PREFIX "dependency:node", ""}});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (removeGroup && depGroup->IsRedundancyGroup()) {
|
||||
EnqueueRelationsDeletion(
|
||||
depGroup->GetIcingaDBIdentifier(),
|
||||
{
|
||||
{CONFIG_REDIS_KEY_PREFIX "dependency:node", ""},
|
||||
{CONFIG_REDIS_KEY_PREFIX "redundancygroup", ""},
|
||||
{CONFIG_REDIS_KEY_PREFIX "redundancygroup:state", ""},
|
||||
{CONFIG_REDIS_KEY_PREFIX "dependency:edge:state", ""}
|
||||
}
|
||||
);
|
||||
} else if (removeGroup) {
|
||||
// Note: The Icinga DB identifier of a non-redundant dependency group is used as the edge state ID
|
||||
// and shared by all of its dependency objects. See also SerializeDependencyEdgeState() for details.
|
||||
EnqueueRelationsDeletion(depGroup->GetIcingaDBIdentifier(), {{CONFIG_REDIS_KEY_PREFIX "dependency:edge:state", ""}});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Enqueue a relation deletion for processing in the pending objects thread.
|
||||
*
|
||||
* This function adds a relation deletion item to the set of pending items to be processed by the
|
||||
* pending items worker thread. The relation deletion item contains the ID of the relation to be
|
||||
* deleted and a map of Redis keys from which to delete the relation. If the relation deletion item
|
||||
* is already in the set, it updates the deletion keys accordingly.
|
||||
*
|
||||
* @param id The ID of the relation to be deleted.
|
||||
* @param relations A map of Redis keys from which to delete the relation.
|
||||
*/
|
||||
void IcingaDB::EnqueueRelationsDeletion(const String& id, icingadb::task_queue::RelationsDeletionItem::RelationsKeySet relations)
|
||||
{
|
||||
namespace queue = icingadb::task_queue;
|
||||
|
||||
if (!GetActive() || !m_RconWorker || !m_RconWorker->IsConnected()) {
|
||||
return; // No need to enqueue anything if we're not connected.
|
||||
}
|
||||
|
||||
{
|
||||
std::lock_guard lock(m_PendingItemsMutex);
|
||||
if (auto [it, inserted] = m_PendingItems.emplace(queue::RelationsDeletionItem{id, relations}); !inserted) {
|
||||
m_PendingItems.modify(it, [&relations](queue::PendingQueueItem& val) {
|
||||
auto& item = std::get<queue::RelationsDeletionItem>(val.Item);
|
||||
item.Relations.merge(std::move(relations));
|
||||
});
|
||||
}
|
||||
}
|
||||
m_PendingItemsCV.notify_one();
|
||||
}
|
||||
|
|
@ -23,8 +23,6 @@ using namespace icinga;
|
|||
|
||||
#define MAX_EVENTS_DEFAULT 5000
|
||||
|
||||
using Prio = RedisConnection::QueryPriority;
|
||||
|
||||
String IcingaDB::m_EnvironmentId;
|
||||
std::mutex IcingaDB::m_EnvironmentIdInitMutex;
|
||||
|
||||
|
|
@ -85,6 +83,8 @@ void IcingaDB::Start(bool runtimeCreated)
|
|||
m_Rcon = new RedisConnection(connInfo);
|
||||
m_RconLocked.store(m_Rcon);
|
||||
|
||||
m_RconWorker = new RedisConnection(connInfo, m_Rcon);
|
||||
|
||||
for (const auto& [type, _] : GetSyncableTypes()) {
|
||||
auto ctype (dynamic_cast<ConfigType*>(type.get()));
|
||||
if (!ctype)
|
||||
|
|
@ -110,6 +110,7 @@ void IcingaDB::Start(bool runtimeCreated)
|
|||
m_Rcon->SetConnectedCallback([this](boost::asio::yield_context&) {
|
||||
m_Rcon->SetConnectedCallback(nullptr);
|
||||
|
||||
m_RconWorker->Start();
|
||||
for (auto& kv : m_Rcons) {
|
||||
kv.second->Start();
|
||||
}
|
||||
|
|
@ -123,12 +124,10 @@ void IcingaDB::Start(bool runtimeCreated)
|
|||
|
||||
m_WorkQueue.SetName("IcingaDB");
|
||||
|
||||
m_Rcon->SuppressQueryKind(Prio::CheckResult);
|
||||
m_Rcon->SuppressQueryKind(Prio::RuntimeStateSync);
|
||||
|
||||
Ptr keepAlive (this);
|
||||
|
||||
m_HistoryThread = std::async(std::launch::async, [this, keepAlive]() { ForwardHistoryEntries(); });
|
||||
m_PendingItemsThread = std::thread([this, keepAlive] { PendingItemsThreadProc(); });
|
||||
}
|
||||
|
||||
void IcingaDB::ExceptionHandler(boost::exception_ptr exp)
|
||||
|
|
@ -152,9 +151,11 @@ void IcingaDB::OnConnectedHandler()
|
|||
|
||||
UpdateAllConfigObjects();
|
||||
|
||||
m_ConfigDumpDone = true;
|
||||
|
||||
m_ConfigDumpDone.store(true);
|
||||
m_ConfigDumpInProgress = false;
|
||||
// Notify the pending items worker to let it know that the config dump is done,
|
||||
// and it can start processing pending items.
|
||||
m_PendingItemsCV.notify_one();
|
||||
}
|
||||
|
||||
void IcingaDB::PublishStatsTimerHandler(void)
|
||||
|
|
@ -182,7 +183,7 @@ void IcingaDB::PublishStats()
|
|||
}
|
||||
}
|
||||
|
||||
m_Rcon->FireAndForgetQuery(std::move(query), Prio::Heartbeat);
|
||||
m_Rcon->FireAndForgetQuery(std::move(query), {}, true /* high priority */);
|
||||
}
|
||||
|
||||
void IcingaDB::Stop(bool runtimeRemoved)
|
||||
|
|
@ -190,6 +191,8 @@ void IcingaDB::Stop(bool runtimeRemoved)
|
|||
Log(LogInformation, "IcingaDB")
|
||||
<< "Flushing history data buffer to Redis.";
|
||||
|
||||
m_PendingItemsCV.notify_all(); // Wake up the pending items worker to let it exit cleanly.
|
||||
|
||||
if (m_HistoryThread.wait_for(std::chrono::minutes(1)) == std::future_status::timeout) {
|
||||
Log(LogCritical, "IcingaDB")
|
||||
<< "Flushing takes more than one minute (while we're about to shut down). Giving up and discarding "
|
||||
|
|
@ -197,6 +200,7 @@ void IcingaDB::Stop(bool runtimeRemoved)
|
|||
}
|
||||
|
||||
m_StatsTimer->Stop(true);
|
||||
m_PendingItemsThread.join();
|
||||
|
||||
Log(LogInformation, "IcingaDB")
|
||||
<< "'" << GetName() << "' stopped.";
|
||||
|
|
|
|||
|
|
@ -16,6 +16,9 @@
|
|||
#include "icinga/service.hpp"
|
||||
#include "icinga/downtime.hpp"
|
||||
#include "remote/messageorigin.hpp"
|
||||
#include <boost/multi_index_container.hpp>
|
||||
#include <boost/multi_index/ordered_index.hpp>
|
||||
#include <boost/multi_index/sequenced_index.hpp>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <future>
|
||||
|
|
@ -32,6 +35,189 @@ namespace icinga
|
|||
#define CONFIG_REDIS_KEY_PREFIX "icinga:"
|
||||
#define CHECKSUM_REDIS_KEY_PREFIX CONFIG_REDIS_KEY_PREFIX "checksum:"
|
||||
|
||||
namespace icingadb::task_queue
|
||||
{
|
||||
|
||||
/**
|
||||
* Dirty bits for config/state changes.
|
||||
*
|
||||
* These are used to mark objects as "dirty" in order to trigger appropriate updates in Redis.
|
||||
* Each bit represents a different type of change that requires a specific action to be taken.
|
||||
*
|
||||
* @ingroup icingadb
|
||||
*/
|
||||
enum DirtyBits : uint32_t
|
||||
{
|
||||
ConfigUpdate = 1<<0, // Trigger a Redis config update for the object.
|
||||
ConfigDelete = 1<<1, // Send a deletion command for the object to Redis.
|
||||
VolatileState = 1<<2, // Send a volatile state update to Redis (affects only checkables).
|
||||
RuntimeState = 1<<3, // Send a runtime state update to Redis (affects only checkables).
|
||||
NextUpdate = 1<<4, // Update the `icinga:nextupdate:{host,service}` Redis keys (affects only checkables).
|
||||
|
||||
FullState = VolatileState | RuntimeState, // A combination of all (non-dependency) state-related dirty bits.
|
||||
|
||||
// All valid dirty bits combined used for masking input values.
|
||||
DirtyBitsAll = ConfigUpdate | ConfigDelete | FullState | NextUpdate
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* A pending configuration object item.
|
||||
*
|
||||
* This struct represents a pending item in the queue that is associated with a configuration object.
|
||||
* It contains a pointer to the configuration object and the dirty bits indicating the type of updates
|
||||
* required for that object in Redis.
|
||||
*
|
||||
* @ingroup icingadb
|
||||
*/
|
||||
struct PendingConfigItem
|
||||
{
|
||||
ConfigObject::Ptr Object;
|
||||
uint32_t DirtyBits;
|
||||
|
||||
PendingConfigItem(const ConfigObject::Ptr& obj, uint32_t bits);
|
||||
|
||||
[[nodiscard]] ConfigObject* GetQueueLookupKey() const
|
||||
{
|
||||
return Object.get();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A pending dependency group state item.
|
||||
*
|
||||
* This struct represents a pending item in the queue that is associated with a dependency group.
|
||||
* It contains a pointer to the dependency group for which state updates are required.
|
||||
*
|
||||
* @ingroup icingadb
|
||||
*/
|
||||
struct PendingDependencyGroupStateItem
|
||||
{
|
||||
DependencyGroup::Ptr DepGroup;
|
||||
|
||||
explicit PendingDependencyGroupStateItem(const DependencyGroup::Ptr& depGroup);
|
||||
|
||||
[[nodiscard]] DependencyGroup* GetQueueLookupKey() const
|
||||
{
|
||||
return DepGroup.get();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A pending dependency edge item.
|
||||
*
|
||||
* This struct represents a pending dependency child registration into a dependency group.
|
||||
* It contains a pointer to the dependency group and the checkable child being registered.
|
||||
*
|
||||
* @ingroup icingadb
|
||||
*/
|
||||
struct PendingDependencyEdgeItem
|
||||
{
|
||||
DependencyGroup::Ptr DepGroup;
|
||||
Checkable::Ptr Child;
|
||||
|
||||
PendingDependencyEdgeItem(const DependencyGroup::Ptr& depGroup, const Checkable::Ptr& child);
|
||||
|
||||
[[nodiscard]] std::pair<DependencyGroup*, Checkable*> GetQueueLookupKey() const
|
||||
{
|
||||
return {DepGroup.get(), Child.get()};
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A pending relations deletion item.
|
||||
*
|
||||
* This struct represents a pending item in the queue that is associated with the deletion of relations
|
||||
* in Redis. It contains a map of Redis keys from which the relation identified by the @c ID field should
|
||||
* be deleted. The @c ID field represents the unique identifier of the relation to be deleted, and the
|
||||
* @c Relations map specifies the Redis keys and whether to delete the corresponding checksum keys.
|
||||
*
|
||||
* @ingroup icingadb
|
||||
*/
|
||||
struct RelationsDeletionItem
|
||||
{
|
||||
std::string ID;
|
||||
// Set of Redis keys from which to delete a relation, along with their checksums (if any).
|
||||
using RelationsKeySet = std::set<std::pair<RedisConnection::QueryArg, RedisConnection::QueryArg>>;
|
||||
RelationsKeySet Relations;
|
||||
|
||||
RelationsDeletionItem(const String& id, const RelationsKeySet& relations);
|
||||
|
||||
[[nodiscard]] std::string_view GetQueueLookupKey() const
|
||||
{
|
||||
return ID;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A pending queue item.
|
||||
*
|
||||
* This struct represents a generic pending item in the queue. The @c EnqueueTime field records the
|
||||
* time when the item was added to the queue, which can be useful for tracking how long an item waits before
|
||||
* being processed. This base struct wraps one of the available more specific pending item types that operate
|
||||
* on different kinds of objects, such as configuration objects or dependency groups.
|
||||
*
|
||||
* @ingroup icingadb
|
||||
*/
|
||||
struct PendingQueueItem
|
||||
{
|
||||
using ItemVariant = std::variant<
|
||||
PendingConfigItem,
|
||||
PendingDependencyGroupStateItem,
|
||||
PendingDependencyEdgeItem,
|
||||
RelationsDeletionItem
|
||||
>;
|
||||
|
||||
ItemVariant Item;
|
||||
std::chrono::steady_clock::time_point EnqueueTime{std::chrono::steady_clock::now()};
|
||||
|
||||
template<typename T,
|
||||
// don't hide the default copy and move constructors
|
||||
typename = std::enable_if_t<!std::is_same_v<std::decay_t<T>, PendingQueueItem>>
|
||||
>
|
||||
explicit PendingQueueItem(T&& item) : Item(std::forward<T>(item)) {}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct KeyExtractor /* not implemented, only the specialization for std::variant is used */;
|
||||
|
||||
/**
|
||||
* Helper to use @c PendingQueueItem in @c boost::multi_index_container.
|
||||
*
|
||||
* It implements a key extractor that @c multi_index_container will invoke for each element to create an index.
|
||||
* Every individual type in @c PendingQueueItem::ItemVariant implements a method @c GetQueueLookupKey() that will
|
||||
* be invoked by this key extractor. The return value from the individual type is then returned as part of a second
|
||||
* variant type (its element types are automatically deduced from the individual @c GetQueueLookupKey() methods).
|
||||
* @c multi_index_container will then just use the standard comparison operators on it for building the index.
|
||||
*
|
||||
* @ingroup icingadb
|
||||
*/
|
||||
template<typename ...Ts>
|
||||
struct KeyExtractor<std::variant<Ts...>> {
|
||||
using result_type = std::variant<decltype(std::declval<Ts>().GetQueueLookupKey())...>;
|
||||
|
||||
result_type operator()(const PendingQueueItem& queueItem) const
|
||||
{
|
||||
return std::visit([](const auto& innerItem) -> result_type {
|
||||
return innerItem.GetQueueLookupKey();
|
||||
}, queueItem.Item);
|
||||
};
|
||||
};
|
||||
|
||||
// A multi-index container for managing pending items with unique IDs and maintaining insertion order.
|
||||
// The first index is an ordered unique index based on the pending item key, allowing for efficient
|
||||
// lookups and ensuring uniqueness of items. The second index is a sequenced index that maintains the
|
||||
// order of insertion, enabling FIFO processing of pending items.
|
||||
using PendingItemsSet = boost::multi_index_container<
|
||||
PendingQueueItem,
|
||||
boost::multi_index::indexed_by<
|
||||
boost::multi_index::ordered_unique<KeyExtractor<PendingQueueItem::ItemVariant>>,
|
||||
boost::multi_index::sequenced<>
|
||||
>
|
||||
>;
|
||||
|
||||
} // namespace icingadb::task_queue
|
||||
|
||||
/**
|
||||
* @ingroup icingadb
|
||||
*/
|
||||
|
|
@ -51,7 +237,7 @@ public:
|
|||
|
||||
String GetEnvironmentId() const override;
|
||||
|
||||
inline RedisConnection::Ptr GetConnection()
|
||||
inline RedisConnection::Ptr GetConnection() const
|
||||
{
|
||||
return m_RconLocked.load();
|
||||
}
|
||||
|
|
@ -104,13 +290,6 @@ private:
|
|||
std::mutex m_Mutex;
|
||||
};
|
||||
|
||||
enum StateUpdate
|
||||
{
|
||||
Volatile = 1ull << 0,
|
||||
RuntimeOnly = 1ull << 1,
|
||||
Full = Volatile | RuntimeOnly,
|
||||
};
|
||||
|
||||
void OnConnectedHandler();
|
||||
|
||||
void PublishStatsTimerHandler();
|
||||
|
|
@ -123,10 +302,8 @@ private:
|
|||
std::vector<Dictionary::Ptr>* runtimeUpdates, const DependencyGroup::Ptr& onlyDependencyGroup = nullptr);
|
||||
void InsertObjectDependencies(const ConfigObject::Ptr& object,
|
||||
std::map<RedisConnection::QueryArg, RedisConnection::Query>& hMSets, std::vector<Dictionary::Ptr>& runtimeUpdates, bool runtimeUpdate);
|
||||
void UpdateDependenciesState(const Checkable::Ptr& checkable, const DependencyGroup::Ptr& onlyDependencyGroup = nullptr,
|
||||
std::set<DependencyGroup*>* seenGroups = nullptr) const;
|
||||
void UpdateState(const Checkable::Ptr& checkable, StateUpdate mode);
|
||||
void SendConfigUpdate(const ConfigObject::Ptr& object, bool runtimeUpdate);
|
||||
void UpdateDependenciesState(const Checkable::Ptr& checkable, const DependencyGroup::Ptr& dependencyGroup) const;
|
||||
void UpdateState(const Checkable::Ptr& checkable, uint32_t mode);
|
||||
void CreateConfigUpdate(const ConfigObject::Ptr& object, const QueryArgPair& redisKeyPair,
|
||||
std::map<RedisConnection::QueryArg, RedisConnection::Query>& hMSets, std::vector<Dictionary::Ptr>& runtimeUpdates, bool runtimeUpdate);
|
||||
void SendConfigDelete(const ConfigObject::Ptr& object);
|
||||
|
|
@ -146,7 +323,7 @@ private:
|
|||
void SendAddedComment(const Comment::Ptr& comment);
|
||||
void SendRemovedComment(const Comment::Ptr& comment);
|
||||
void SendFlappingChange(const Checkable::Ptr& checkable, double changeTime, double flappingLastChange);
|
||||
void SendNextUpdate(const Checkable::Ptr& checkable);
|
||||
void SendNextUpdate(const Checkable::Ptr& checkable) const;
|
||||
void SendAcknowledgementSet(const Checkable::Ptr& checkable, const String& author, const String& comment, AcknowledgementType type, bool persistent, double changeTime, double expiry);
|
||||
void SendAcknowledgementCleared(const Checkable::Ptr& checkable, const String& removedBy, double changeTime, double ackLastChange);
|
||||
void SendNotificationUsersChanged(const Notification::Ptr& notification, const Array::Ptr& oldValues, const Array::Ptr& newValues);
|
||||
|
|
@ -159,21 +336,19 @@ private:
|
|||
void SendCommandEnvChanged(const ConfigObject::Ptr& command, const CmdArgEnvRedisKeys& cmdRedisKeys, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues);
|
||||
void SendCommandArgumentsChanged(const ConfigObject::Ptr& command, const CmdArgEnvRedisKeys& cmdRedisKeys, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues);
|
||||
void SendCustomVarsChanged(const ConfigObject::Ptr& object, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues);
|
||||
void SendDependencyGroupChildRegistered(const Checkable::Ptr& child, const DependencyGroup::Ptr& dependencyGroup);
|
||||
void SendDependencyGroupChildRemoved(const DependencyGroup::Ptr& dependencyGroup, const std::vector<Dependency::Ptr>& dependencies, bool removeGroup);
|
||||
|
||||
void ForwardHistoryEntries();
|
||||
|
||||
std::vector<String> UpdateObjectAttrs(const ConfigObject::Ptr& object, int fieldType, const String& typeNameOverride);
|
||||
Dictionary::Ptr SerializeState(const Checkable::Ptr& checkable);
|
||||
|
||||
/* Stats */
|
||||
static Dictionary::Ptr GetStats();
|
||||
|
||||
/* utilities */
|
||||
static void DeleteKeys(const RedisConnection::Ptr& conn, const std::vector<RedisConnection::QueryArg>& keys, RedisConnection::QueryPriority priority);
|
||||
static void DeleteKeys(const RedisConnection::Ptr& conn, const std::vector<RedisConnection::QueryArg>& keys);
|
||||
static std::vector<RedisConnection::QueryArg> GetTypeOverwriteKeys(const Type::Ptr& type, bool skipChecksums = false);
|
||||
static void AddDataToHmSets(std::map<RedisConnection::QueryArg, RedisConnection::Query>& hMSets, const RedisConnection::QueryArg& redisKey, const String& id, const Dictionary::Ptr& data);
|
||||
static bool IsStateKey(const RedisConnection::QueryArg& key);
|
||||
static String FormatCheckSumBinary(const String& str);
|
||||
static String FormatCommandLine(const Value& commandLine);
|
||||
static QueryArgPair GetCheckableStateKeys(const Type::Ptr& type);
|
||||
|
|
@ -192,6 +367,7 @@ private:
|
|||
static Dictionary::Ptr SerializeVars(const Dictionary::Ptr& vars);
|
||||
static Dictionary::Ptr SerializeDependencyEdgeState(const DependencyGroup::Ptr& dependencyGroup, const Dependency::Ptr& dep);
|
||||
static Dictionary::Ptr SerializeRedundancyGroupState(const Checkable::Ptr& child, const DependencyGroup::Ptr& redundancyGroup);
|
||||
static String GetDependencyEdgeStateId(const DependencyGroup::Ptr& dependencyGroup, const Dependency::Ptr& dep);
|
||||
|
||||
static String HashValue(const Value& value);
|
||||
static String HashValue(const Value& value, const std::set<String>& propertiesBlacklist, bool propertiesWhitelist = false);
|
||||
|
|
@ -214,7 +390,7 @@ private:
|
|||
static void CommentRemovedHandler(const Comment::Ptr& comment);
|
||||
static void FlappingChangeHandler(const Checkable::Ptr& checkable, double changeTime);
|
||||
static void NewCheckResultHandler(const Checkable::Ptr& checkable);
|
||||
static void NextCheckUpdatedHandler(const Checkable::Ptr& checkable);
|
||||
static void NextCheckChangedHandler(const Checkable::Ptr& checkable);
|
||||
static void DependencyGroupChildRegisteredHandler(const Checkable::Ptr& child, const DependencyGroup::Ptr& dependencyGroup);
|
||||
static void DependencyGroupChildRemovedHandler(const DependencyGroup::Ptr& dependencyGroup, const std::vector<Dependency::Ptr>& dependencies, bool removeGroup);
|
||||
static void HostProblemChangedHandler(const Service::Ptr& service);
|
||||
|
|
@ -253,13 +429,31 @@ private:
|
|||
Bulker<RedisConnection::Query> m_HistoryBulker {4096, std::chrono::milliseconds(250)};
|
||||
|
||||
bool m_ConfigDumpInProgress;
|
||||
bool m_ConfigDumpDone;
|
||||
std::atomic_bool m_ConfigDumpDone{false};
|
||||
|
||||
/**
|
||||
* The primary Redis connection used to send history and heartbeat queries.
|
||||
*
|
||||
* This connection is used exclusively for sending history and heartbeat queries to Redis. It ensures that
|
||||
* history and heartbeat operations do not interfere with other Redis operations. Also, it is the leader for
|
||||
* all other Redis connections including @c m_RconWorker, and is the only source of truth for all IcingaDB Redis
|
||||
* related connection statistics.
|
||||
*
|
||||
* Note: This will still be shared with the icingadb check command, as that command also sends
|
||||
* only XREAD queries which are similar in nature to history/heartbeat queries.
|
||||
*/
|
||||
RedisConnection::Ptr m_Rcon;
|
||||
// m_RconLocked containes a copy of the value in m_Rcon where all accesses are guarded by a mutex to allow safe
|
||||
// concurrent access like from the icingadb check command. It's a copy to still allow fast access without additional
|
||||
// syncronization to m_Rcon within the IcingaDB feature itself.
|
||||
// m_RconLocked contains a copy of the value in m_Rcon where all accesses are guarded by a mutex to
|
||||
// allow safe concurrent access like from the icingadb check command. It's a copy to still allow fast access
|
||||
// without additional synchronization to m_Rcon within the IcingaDB feature itself.
|
||||
Locked<RedisConnection::Ptr> m_RconLocked;
|
||||
/**
|
||||
* A Redis connection for config and state updates.
|
||||
*
|
||||
* This connection is used for all non-history and non-heartbeat related queries to Redis.
|
||||
* It is a child of @c m_Rcon, meaning it forwards all its connection stats to @c m_Rcon as well.
|
||||
*/
|
||||
RedisConnection::Ptr m_RconWorker;
|
||||
std::unordered_map<ConfigType*, RedisConnection::Ptr> m_Rcons;
|
||||
std::atomic_size_t m_PendingRcons;
|
||||
|
||||
|
|
@ -272,6 +466,24 @@ private:
|
|||
// initialization, the value is read-only and can be accessed without further synchronization.
|
||||
static String m_EnvironmentId;
|
||||
static std::mutex m_EnvironmentIdInitMutex;
|
||||
|
||||
std::thread m_PendingItemsThread; // The background worker thread (consumer of m_PendingItems).
|
||||
icingadb::task_queue::PendingItemsSet m_PendingItems; // Container for pending items with dirty bits (access protected by m_PendingItemsMutex).
|
||||
std::mutex m_PendingItemsMutex; // Mutex to protect access to m_PendingItems.
|
||||
std::condition_variable m_PendingItemsCV; // Condition variable to forcefully wake up the worker thread.
|
||||
|
||||
void PendingItemsThreadProc();
|
||||
|
||||
void ProcessQueueItem(const icingadb::task_queue::PendingConfigItem& item);
|
||||
void ProcessQueueItem(const icingadb::task_queue::PendingDependencyGroupStateItem& item) const;
|
||||
void ProcessQueueItem(const icingadb::task_queue::PendingDependencyEdgeItem& item);
|
||||
void ProcessQueueItem(const icingadb::task_queue::RelationsDeletionItem& item);
|
||||
|
||||
void EnqueueConfigObject(const ConfigObject::Ptr& object, uint32_t bits);
|
||||
void EnqueueDependencyGroupStateUpdate(const DependencyGroup::Ptr& depGroup);
|
||||
void EnqueueDependencyChildRegistered(const DependencyGroup::Ptr& depGroup, const Checkable::Ptr& child);
|
||||
void EnqueueDependencyChildRemoved(const DependencyGroup::Ptr& depGroup, const std::vector<Dependency::Ptr>& dependencies, bool removeGroup);
|
||||
void EnqueueRelationsDeletion(const String& id, icingadb::task_queue::RelationsDeletionItem::RelationsKeySet relations);
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -127,7 +127,8 @@ void IcingadbCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckR
|
|||
"0-0", "0-0", "0-0", "0-0", "0-0", "0-0",
|
||||
}
|
||||
},
|
||||
RedisConnection::QueryPriority::Heartbeat
|
||||
{},
|
||||
true /* high priority */
|
||||
));
|
||||
|
||||
redisTime = std::move(replies.at(0));
|
||||
|
|
|
|||
|
|
@ -13,7 +13,9 @@
|
|||
#include "base/utility.hpp"
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/coroutine/exceptions.hpp>
|
||||
#include <boost/container/static_vector.hpp>
|
||||
#include <boost/date_time/posix_time/posix_time_duration.hpp>
|
||||
#include <boost/range/algorithm/min_element.hpp>
|
||||
#include <exception>
|
||||
#include <future>
|
||||
#include <iterator>
|
||||
|
|
@ -112,12 +114,22 @@ void LogQuery(RedisConnection::Query& query, Log& msg)
|
|||
}
|
||||
|
||||
/**
|
||||
* Queue a Redis query for sending
|
||||
* Queue a Redis query for sending without waiting for the response in a fire-and-forget manner.
|
||||
*
|
||||
* @param query Redis query
|
||||
* @param priority The query's priority
|
||||
* If the highPriority flag is set to true, the query is treated with high priority and placed at the front of
|
||||
* the write queue, ensuring it is sent before other queued queries. This is useful for time-sensitive operations
|
||||
* that require to be executed promptly, which is the case for IcingaDB heartbeat queries. If there are already
|
||||
* queries with high priority in the queue, the new query is inserted after all existing high priority queries but
|
||||
* before any normal priority queries to maintain the order of high priority items.
|
||||
*
|
||||
* @note The highPriority flag should be used sparingly and only for critical queries, as it can affect the overall
|
||||
* performance and responsiveness of the Redis connection by potentially delaying other queued queries.
|
||||
*
|
||||
* @param query The Redis query to be sent.
|
||||
* @param affects Does the query affect config, state or history data.
|
||||
* @param highPriority Whether the query should be treated with high priority.
|
||||
*/
|
||||
void RedisConnection::FireAndForgetQuery(RedisConnection::Query query, RedisConnection::QueryPriority priority, QueryAffects affects)
|
||||
void RedisConnection::FireAndForgetQuery(Query query, QueryAffects affects, bool highPriority)
|
||||
{
|
||||
if (LogDebug >= Logger::GetMinLogSeverity()) {
|
||||
Log msg (LogDebug, "IcingaDB", "Firing and forgetting query:");
|
||||
|
|
@ -125,10 +137,9 @@ void RedisConnection::FireAndForgetQuery(RedisConnection::Query query, RedisConn
|
|||
}
|
||||
|
||||
auto item (Shared<Query>::Make(std::move(query)));
|
||||
auto ctime (Utility::GetTime());
|
||||
|
||||
asio::post(m_Strand, [this, item, priority, ctime, affects]() {
|
||||
m_Queues.Writes[priority].emplace(WriteQueueItem{item, nullptr, nullptr, nullptr, nullptr, ctime, affects});
|
||||
asio::post(m_Strand, [this, item, highPriority, affects, ctime = Utility::GetTime()]() {
|
||||
m_Queues.Push(WriteQueueItem{item, ctime, affects}, highPriority);
|
||||
m_QueuedWrites.Set();
|
||||
IncreasePendingQueries(1);
|
||||
});
|
||||
|
|
@ -138,9 +149,8 @@ void RedisConnection::FireAndForgetQuery(RedisConnection::Query query, RedisConn
|
|||
* Queue Redis queries for sending
|
||||
*
|
||||
* @param queries Redis queries
|
||||
* @param priority The queries' priority
|
||||
*/
|
||||
void RedisConnection::FireAndForgetQueries(RedisConnection::Queries queries, RedisConnection::QueryPriority priority, QueryAffects affects)
|
||||
void RedisConnection::FireAndForgetQueries(RedisConnection::Queries queries, QueryAffects affects)
|
||||
{
|
||||
if (LogDebug >= Logger::GetMinLogSeverity()) {
|
||||
for (auto& query : queries) {
|
||||
|
|
@ -150,10 +160,9 @@ void RedisConnection::FireAndForgetQueries(RedisConnection::Queries queries, Red
|
|||
}
|
||||
|
||||
auto item (Shared<Queries>::Make(std::move(queries)));
|
||||
auto ctime (Utility::GetTime());
|
||||
|
||||
asio::post(m_Strand, [this, item, priority, ctime, affects]() {
|
||||
m_Queues.Writes[priority].emplace(WriteQueueItem{nullptr, item, nullptr, nullptr, nullptr, ctime, affects});
|
||||
asio::post(m_Strand, [this, item, affects, ctime = Utility::GetTime()]() {
|
||||
m_Queues.Push(WriteQueueItem{item, ctime, affects}, false);
|
||||
m_QueuedWrites.Set();
|
||||
IncreasePendingQueries(item->size());
|
||||
});
|
||||
|
|
@ -163,11 +172,10 @@ void RedisConnection::FireAndForgetQueries(RedisConnection::Queries queries, Red
|
|||
* Queue a Redis query for sending, wait for the response and return (or throw) it
|
||||
*
|
||||
* @param query Redis query
|
||||
* @param priority The query's priority
|
||||
*
|
||||
* @return The response
|
||||
*/
|
||||
RedisConnection::Reply RedisConnection::GetResultOfQuery(RedisConnection::Query query, RedisConnection::QueryPriority priority, QueryAffects affects)
|
||||
RedisConnection::Reply RedisConnection::GetResultOfQuery(RedisConnection::Query query, QueryAffects affects)
|
||||
{
|
||||
if (LogDebug >= Logger::GetMinLogSeverity()) {
|
||||
Log msg (LogDebug, "IcingaDB", "Executing query:");
|
||||
|
|
@ -177,10 +185,9 @@ RedisConnection::Reply RedisConnection::GetResultOfQuery(RedisConnection::Query
|
|||
std::promise<Reply> promise;
|
||||
auto future (promise.get_future());
|
||||
auto item (Shared<std::pair<Query, std::promise<Reply>>>::Make(std::move(query), std::move(promise)));
|
||||
auto ctime (Utility::GetTime());
|
||||
|
||||
asio::post(m_Strand, [this, item, priority, ctime, affects]() {
|
||||
m_Queues.Writes[priority].emplace(WriteQueueItem{nullptr, nullptr, item, nullptr, nullptr, ctime, affects});
|
||||
asio::post(m_Strand, [this, item, affects, ctime = Utility::GetTime()]() {
|
||||
m_Queues.Push(WriteQueueItem{item, ctime, affects}, false);
|
||||
m_QueuedWrites.Set();
|
||||
IncreasePendingQueries(1);
|
||||
});
|
||||
|
|
@ -194,11 +201,10 @@ RedisConnection::Reply RedisConnection::GetResultOfQuery(RedisConnection::Query
|
|||
* Queue Redis queries for sending, wait for the responses and return (or throw) them
|
||||
*
|
||||
* @param queries Redis queries
|
||||
* @param priority The queries' priority
|
||||
*
|
||||
* @return The responses
|
||||
*/
|
||||
RedisConnection::Replies RedisConnection::GetResultsOfQueries(RedisConnection::Queries queries, RedisConnection::QueryPriority priority, QueryAffects affects)
|
||||
RedisConnection::Replies RedisConnection::GetResultsOfQueries(Queries queries, QueryAffects affects, bool highPriority)
|
||||
{
|
||||
if (LogDebug >= Logger::GetMinLogSeverity()) {
|
||||
for (auto& query : queries) {
|
||||
|
|
@ -210,10 +216,9 @@ RedisConnection::Replies RedisConnection::GetResultsOfQueries(RedisConnection::Q
|
|||
std::promise<Replies> promise;
|
||||
auto future (promise.get_future());
|
||||
auto item (Shared<std::pair<Queries, std::promise<Replies>>>::Make(std::move(queries), std::move(promise)));
|
||||
auto ctime (Utility::GetTime());
|
||||
|
||||
asio::post(m_Strand, [this, item, priority, ctime, affects]() {
|
||||
m_Queues.Writes[priority].emplace(WriteQueueItem{nullptr, nullptr, nullptr, item, nullptr, ctime, affects});
|
||||
asio::post(m_Strand, [this, item, highPriority, affects, ctime = Utility::GetTime()]() {
|
||||
m_Queues.Push(WriteQueueItem{item, ctime, affects}, highPriority);
|
||||
m_QueuedWrites.Set();
|
||||
IncreasePendingQueries(item->first.size());
|
||||
});
|
||||
|
|
@ -223,12 +228,10 @@ RedisConnection::Replies RedisConnection::GetResultsOfQueries(RedisConnection::Q
|
|||
return future.get();
|
||||
}
|
||||
|
||||
void RedisConnection::EnqueueCallback(const std::function<void(boost::asio::yield_context&)>& callback, RedisConnection::QueryPriority priority)
|
||||
void RedisConnection::EnqueueCallback(const std::function<void(boost::asio::yield_context&)>& callback)
|
||||
{
|
||||
auto ctime (Utility::GetTime());
|
||||
|
||||
asio::post(m_Strand, [this, callback, priority, ctime]() {
|
||||
m_Queues.Writes[priority].emplace(WriteQueueItem{nullptr, nullptr, nullptr, nullptr, callback, ctime, QueryAffects{}});
|
||||
asio::post(m_Strand, [this, callback, ctime = Utility::GetTime()]() {
|
||||
m_Queues.Push(WriteQueueItem{callback, ctime, {}}, false);
|
||||
m_QueuedWrites.Set();
|
||||
});
|
||||
}
|
||||
|
|
@ -241,7 +244,7 @@ void RedisConnection::EnqueueCallback(const std::function<void(boost::asio::yiel
|
|||
*/
|
||||
void RedisConnection::Sync()
|
||||
{
|
||||
GetResultOfQuery({"PING"}, RedisConnection::QueryPriority::SyncConnection);
|
||||
GetResultOfQuery({"PING"});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -249,54 +252,32 @@ void RedisConnection::Sync()
|
|||
*
|
||||
* @return *nix timestamp or 0
|
||||
*/
|
||||
double RedisConnection::GetOldestPendingQueryTs()
|
||||
double RedisConnection::GetOldestPendingQueryTs() const
|
||||
{
|
||||
auto promise (Shared<std::promise<double>>::Make());
|
||||
auto future (promise->get_future());
|
||||
|
||||
asio::post(m_Strand, [this, promise]() {
|
||||
double oldest = 0;
|
||||
boost::container::static_vector<double, 2> timestamps;
|
||||
|
||||
for (auto& queue : m_Queues.Writes) {
|
||||
if (m_SuppressedQueryKinds.find(queue.first) == m_SuppressedQueryKinds.end() && !queue.second.empty()) {
|
||||
auto ctime (queue.second.front().CTime);
|
||||
|
||||
if (ctime < oldest || oldest == 0) {
|
||||
oldest = ctime;
|
||||
}
|
||||
}
|
||||
if (!m_Queues.HighWriteQ.empty()) {
|
||||
timestamps.push_back(m_Queues.HighWriteQ.front().CTime);
|
||||
}
|
||||
if (!m_Queues.NormalWriteQ.empty()) {
|
||||
timestamps.push_back(m_Queues.NormalWriteQ.front().CTime);
|
||||
}
|
||||
|
||||
promise->set_value(oldest);
|
||||
if (timestamps.empty()) {
|
||||
timestamps.push_back(0);
|
||||
}
|
||||
|
||||
promise->set_value(*boost::range::min_element(timestamps));
|
||||
});
|
||||
|
||||
future.wait();
|
||||
return future.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark kind as kind of queries not to actually send yet
|
||||
*
|
||||
* @param kind Query kind
|
||||
*/
|
||||
void RedisConnection::SuppressQueryKind(RedisConnection::QueryPriority kind)
|
||||
{
|
||||
asio::post(m_Strand, [this, kind]() { m_SuppressedQueryKinds.emplace(kind); });
|
||||
}
|
||||
|
||||
/**
|
||||
* Unmark kind as kind of queries not to actually send yet
|
||||
*
|
||||
* @param kind Query kind
|
||||
*/
|
||||
void RedisConnection::UnsuppressQueryKind(RedisConnection::QueryPriority kind)
|
||||
{
|
||||
asio::post(m_Strand, [this, kind]() {
|
||||
m_SuppressedQueryKinds.erase(kind);
|
||||
m_QueuedWrites.Set();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to connect to Redis
|
||||
*/
|
||||
|
|
@ -472,18 +453,16 @@ void RedisConnection::WriteLoop(asio::yield_context& yc)
|
|||
for (;;) {
|
||||
m_QueuedWrites.Wait(yc);
|
||||
|
||||
WriteFirstOfHighestPrio:
|
||||
for (auto& queue : m_Queues.Writes) {
|
||||
if (m_SuppressedQueryKinds.find(queue.first) != m_SuppressedQueryKinds.end() || queue.second.empty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto next (std::move(queue.second.front()));
|
||||
queue.second.pop();
|
||||
|
||||
WriteItem(yc, std::move(next));
|
||||
|
||||
goto WriteFirstOfHighestPrio;
|
||||
while (m_Queues.HasWrites()) {
|
||||
auto queuedWrite(m_Queues.PopFront());
|
||||
std::visit(
|
||||
[this, &yc, &queuedWrite](const auto& item) {
|
||||
if (WriteItem(item, yc)) {
|
||||
RecordAffected(queuedWrite.Affects, Utility::GetTime());
|
||||
}
|
||||
},
|
||||
queuedWrite.Item
|
||||
);
|
||||
}
|
||||
|
||||
m_QueuedWrites.Clear();
|
||||
|
|
@ -526,111 +505,138 @@ void RedisConnection::LogStats(asio::yield_context& yc)
|
|||
}
|
||||
|
||||
/**
|
||||
* Send next and schedule receiving the response
|
||||
* Write a single Redis query in a fire-and-forget manner.
|
||||
*
|
||||
* @param next Redis queries
|
||||
* @param item Redis query
|
||||
*
|
||||
* @return true on success, false on failure.
|
||||
*/
|
||||
void RedisConnection::WriteItem(boost::asio::yield_context& yc, RedisConnection::WriteQueueItem next)
|
||||
bool RedisConnection::WriteItem(const FireAndForgetQ& item, boost::asio::yield_context& yc)
|
||||
{
|
||||
if (next.FireAndForgetQuery) {
|
||||
auto& item (*next.FireAndForgetQuery);
|
||||
DecreasePendingQueries(1);
|
||||
DecreasePendingQueries(1);
|
||||
|
||||
try {
|
||||
WriteOne(item, yc);
|
||||
} catch (const std::exception& ex) {
|
||||
Log msg (LogCritical, "IcingaDB", "Error during sending query");
|
||||
LogQuery(item, msg);
|
||||
msg << " which has been fired and forgotten: " << ex.what();
|
||||
try {
|
||||
WriteOne(*item, yc);
|
||||
} catch (const std::exception& ex) {
|
||||
Log msg (LogCritical, "IcingaDB", "Error during sending query");
|
||||
LogQuery(*item, msg);
|
||||
msg << " which has been fired and forgotten: " << ex.what();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (m_Queues.FutureResponseActions.empty() || m_Queues.FutureResponseActions.back().Action != ResponseAction::Ignore) {
|
||||
m_Queues.FutureResponseActions.emplace(FutureResponseAction{1, ResponseAction::Ignore});
|
||||
} else {
|
||||
++m_Queues.FutureResponseActions.back().Amount;
|
||||
}
|
||||
|
||||
m_QueuedReads.Set();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (next.FireAndForgetQueries) {
|
||||
auto& item (*next.FireAndForgetQueries);
|
||||
size_t i = 0;
|
||||
|
||||
DecreasePendingQueries(item.size());
|
||||
|
||||
try {
|
||||
for (auto& query : item) {
|
||||
WriteOne(query, yc);
|
||||
++i;
|
||||
}
|
||||
} catch (const std::exception& ex) {
|
||||
Log msg (LogCritical, "IcingaDB", "Error during sending query");
|
||||
LogQuery(item[i], msg);
|
||||
msg << " which has been fired and forgotten: " << ex.what();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (m_Queues.FutureResponseActions.empty() || m_Queues.FutureResponseActions.back().Action != ResponseAction::Ignore) {
|
||||
m_Queues.FutureResponseActions.emplace(FutureResponseAction{item.size(), ResponseAction::Ignore});
|
||||
} else {
|
||||
m_Queues.FutureResponseActions.back().Amount += item.size();
|
||||
}
|
||||
|
||||
m_QueuedReads.Set();
|
||||
if (m_Queues.FutureResponseActions.empty() || m_Queues.FutureResponseActions.back().Action != ResponseAction::Ignore) {
|
||||
m_Queues.FutureResponseActions.emplace(FutureResponseAction{1, ResponseAction::Ignore});
|
||||
} else {
|
||||
++m_Queues.FutureResponseActions.back().Amount;
|
||||
}
|
||||
|
||||
if (next.GetResultOfQuery) {
|
||||
auto& item (*next.GetResultOfQuery);
|
||||
DecreasePendingQueries(1);
|
||||
m_QueuedReads.Set();
|
||||
return true;
|
||||
}
|
||||
|
||||
try {
|
||||
WriteOne(item.first, yc);
|
||||
} catch (const std::exception&) {
|
||||
item.second.set_exception(std::current_exception());
|
||||
/**
|
||||
* Write multiple Redis queries in a fire-and-forget manner.
|
||||
*
|
||||
* @param item Redis queries
|
||||
*
|
||||
* @return true on success, false on failure.
|
||||
*/
|
||||
bool RedisConnection::WriteItem(const FireAndForgetQs& item, boost::asio::yield_context& yc)
|
||||
{
|
||||
size_t i = 0;
|
||||
|
||||
return;
|
||||
DecreasePendingQueries(item->size());
|
||||
|
||||
try {
|
||||
for (auto& query : *item) {
|
||||
WriteOne(query, yc);
|
||||
++i;
|
||||
}
|
||||
} catch (const std::exception& ex) {
|
||||
Log msg (LogCritical, "IcingaDB", "Error during sending query");
|
||||
LogQuery((*item)[i], msg);
|
||||
msg << " which has been fired and forgotten: " << ex.what();
|
||||
|
||||
m_Queues.ReplyPromises.emplace(std::move(item.second));
|
||||
|
||||
if (m_Queues.FutureResponseActions.empty() || m_Queues.FutureResponseActions.back().Action != ResponseAction::Deliver) {
|
||||
m_Queues.FutureResponseActions.emplace(FutureResponseAction{1, ResponseAction::Deliver});
|
||||
} else {
|
||||
++m_Queues.FutureResponseActions.back().Amount;
|
||||
}
|
||||
|
||||
m_QueuedReads.Set();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (next.GetResultsOfQueries) {
|
||||
auto& item (*next.GetResultsOfQueries);
|
||||
DecreasePendingQueries(item.first.size());
|
||||
if (m_Queues.FutureResponseActions.empty() || m_Queues.FutureResponseActions.back().Action != ResponseAction::Ignore) {
|
||||
m_Queues.FutureResponseActions.emplace(FutureResponseAction{item->size(), ResponseAction::Ignore});
|
||||
} else {
|
||||
m_Queues.FutureResponseActions.back().Amount += item->size();
|
||||
}
|
||||
|
||||
try {
|
||||
for (auto& query : item.first) {
|
||||
WriteOne(query, yc);
|
||||
}
|
||||
} catch (const std::exception&) {
|
||||
item.second.set_exception(std::current_exception());
|
||||
m_QueuedReads.Set();
|
||||
return true;
|
||||
}
|
||||
|
||||
return;
|
||||
/**
|
||||
* Write a single Redis query and enqueue a response promise to be fulfilled once the response has been received.
|
||||
*
|
||||
* @param item Redis query and promise for the response
|
||||
*/
|
||||
bool RedisConnection::WriteItem(const QueryWithPromise& item, boost::asio::yield_context& yc)
|
||||
{
|
||||
DecreasePendingQueries(1);
|
||||
|
||||
try {
|
||||
WriteOne(item->first, yc);
|
||||
} catch (const std::exception&) {
|
||||
item->second.set_exception(std::current_exception());
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
m_Queues.ReplyPromises.push(std::move(item->second));
|
||||
|
||||
if (m_Queues.FutureResponseActions.empty() || m_Queues.FutureResponseActions.back().Action != ResponseAction::Deliver) {
|
||||
m_Queues.FutureResponseActions.emplace(FutureResponseAction{1, ResponseAction::Deliver});
|
||||
} else {
|
||||
++m_Queues.FutureResponseActions.back().Amount;
|
||||
}
|
||||
|
||||
m_QueuedReads.Set();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write multiple Redis queries and enqueue a response promise to be fulfilled once all responses have been received.
|
||||
*
|
||||
* @param item Redis queries and promise for the responses.
|
||||
*
|
||||
* @return true on success, false on failure.
|
||||
*/
|
||||
bool RedisConnection::WriteItem(const QueriesWithPromise& item, boost::asio::yield_context& yc)
|
||||
{
|
||||
DecreasePendingQueries(item->first.size());
|
||||
|
||||
try {
|
||||
for (auto& query : item->first) {
|
||||
WriteOne(query, yc);
|
||||
}
|
||||
} catch (const std::exception&) {
|
||||
item->second.set_exception(std::current_exception());
|
||||
|
||||
m_Queues.RepliesPromises.emplace(std::move(item.second));
|
||||
m_Queues.FutureResponseActions.emplace(FutureResponseAction{item.first.size(), ResponseAction::DeliverBulk});
|
||||
|
||||
m_QueuedReads.Set();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (next.Callback) {
|
||||
next.Callback(yc);
|
||||
}
|
||||
m_Queues.RepliesPromises.emplace(std::move(item->second));
|
||||
m_Queues.FutureResponseActions.emplace(FutureResponseAction{item->first.size(), ResponseAction::DeliverBulk});
|
||||
|
||||
RecordAffected(next.Affects, Utility::GetTime());
|
||||
m_QueuedReads.Set();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes the provided callback immediately.
|
||||
*
|
||||
* @param item Callback to execute
|
||||
*/
|
||||
bool RedisConnection::WriteItem(const QueryCallback& item, boost::asio::yield_context& yc)
|
||||
{
|
||||
item(yc);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -38,12 +38,12 @@
|
|||
#include <map>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <set>
|
||||
#include <stdexcept>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
#include <variant>
|
||||
|
||||
namespace icinga
|
||||
{
|
||||
|
|
@ -142,22 +142,6 @@ struct RedisConnInfo final : SharedObject
|
|||
typedef Value Reply;
|
||||
typedef std::vector<Reply> Replies;
|
||||
|
||||
/**
|
||||
* Redis query priorities, highest first.
|
||||
*
|
||||
* @ingroup icingadb
|
||||
*/
|
||||
enum class QueryPriority : unsigned char
|
||||
{
|
||||
Heartbeat,
|
||||
RuntimeStateStream, // runtime state updates, doesn't affect initially synced states
|
||||
Config, // includes initially synced states
|
||||
RuntimeStateSync, // updates initially synced states at runtime, in parallel to config dump, therefore must be < Config
|
||||
History,
|
||||
CheckResult,
|
||||
SyncConnection = 255
|
||||
};
|
||||
|
||||
struct QueryAffects
|
||||
{
|
||||
size_t Config;
|
||||
|
|
@ -178,24 +162,21 @@ struct RedisConnInfo final : SharedObject
|
|||
return m_Connected.load();
|
||||
}
|
||||
|
||||
void FireAndForgetQuery(Query query, QueryPriority priority, QueryAffects affects = {});
|
||||
void FireAndForgetQueries(Queries queries, QueryPriority priority, QueryAffects affects = {});
|
||||
void FireAndForgetQuery(Query query, QueryAffects affects = {}, bool highPriority = false);
|
||||
void FireAndForgetQueries(Queries queries, QueryAffects affects = {});
|
||||
|
||||
Reply GetResultOfQuery(Query query, QueryPriority priority, QueryAffects affects = {});
|
||||
Replies GetResultsOfQueries(Queries queries, QueryPriority priority, QueryAffects affects = {});
|
||||
Reply GetResultOfQuery(Query query, QueryAffects affects = {});
|
||||
Replies GetResultsOfQueries(Queries queries, QueryAffects affects = {}, bool highPriority = false);
|
||||
|
||||
void EnqueueCallback(const std::function<void(boost::asio::yield_context&)>& callback, QueryPriority priority);
|
||||
void EnqueueCallback(const std::function<void(boost::asio::yield_context&)>& callback);
|
||||
void Sync();
|
||||
double GetOldestPendingQueryTs();
|
||||
|
||||
void SuppressQueryKind(QueryPriority kind);
|
||||
void UnsuppressQueryKind(QueryPriority kind);
|
||||
double GetOldestPendingQueryTs() const;
|
||||
|
||||
void SetConnectedCallback(std::function<void(boost::asio::yield_context& yc)> callback);
|
||||
|
||||
int GetQueryCount(RingBuffer::SizeType span);
|
||||
|
||||
inline int GetPendingQueryCount() const
|
||||
inline std::size_t GetPendingQueryCount() const
|
||||
{
|
||||
return m_PendingQueries;
|
||||
}
|
||||
|
|
@ -239,20 +220,21 @@ struct RedisConnInfo final : SharedObject
|
|||
ResponseAction Action;
|
||||
};
|
||||
|
||||
using FireAndForgetQ = Shared<Query>::Ptr; // A single query that does not expect a result.
|
||||
using FireAndForgetQs = Shared<Queries>::Ptr; // Multiple queries that do not expect results.
|
||||
using QueryWithPromise = Shared<std::pair<Query, std::promise<Reply>>>::Ptr; // A single query expecting a result.
|
||||
using QueriesWithPromise = Shared<std::pair<Queries, std::promise<Replies>>>::Ptr; // Multiple queries expecting results.
|
||||
using QueryCallback = std::function<void(boost::asio::yield_context&)>; // A callback to be executed.
|
||||
|
||||
/**
|
||||
* Something to be send to Redis.
|
||||
* An item in the write queue to be sent to Redis.
|
||||
*
|
||||
* @ingroup icingadb
|
||||
*/
|
||||
struct WriteQueueItem
|
||||
{
|
||||
Shared<Query>::Ptr FireAndForgetQuery;
|
||||
Shared<Queries>::Ptr FireAndForgetQueries;
|
||||
Shared<std::pair<Query, std::promise<Reply>>>::Ptr GetResultOfQuery;
|
||||
Shared<std::pair<Queries, std::promise<Replies>>>::Ptr GetResultsOfQueries;
|
||||
std::function<void(boost::asio::yield_context&)> Callback;
|
||||
|
||||
double CTime;
|
||||
std::variant<FireAndForgetQ, FireAndForgetQs, QueryWithPromise, QueriesWithPromise, QueryCallback> Item;
|
||||
double CTime; // When was this item queued?
|
||||
QueryAffects Affects;
|
||||
};
|
||||
|
||||
|
|
@ -281,7 +263,11 @@ struct RedisConnInfo final : SharedObject
|
|||
void ReadLoop(boost::asio::yield_context& yc);
|
||||
void WriteLoop(boost::asio::yield_context& yc);
|
||||
void LogStats(boost::asio::yield_context& yc);
|
||||
void WriteItem(boost::asio::yield_context& yc, WriteQueueItem item);
|
||||
bool WriteItem(const FireAndForgetQ& item, boost::asio::yield_context& yc);
|
||||
bool WriteItem(const FireAndForgetQs& item, boost::asio::yield_context& yc);
|
||||
bool WriteItem(const QueryWithPromise& item, boost::asio::yield_context& yc);
|
||||
bool WriteItem(const QueriesWithPromise& item, boost::asio::yield_context& yc);
|
||||
bool WriteItem(const QueryCallback& item, boost::asio::yield_context& yc);
|
||||
Reply ReadOne(boost::asio::yield_context& yc);
|
||||
void WriteOne(Query& query, boost::asio::yield_context& yc);
|
||||
|
||||
|
|
@ -310,18 +296,41 @@ struct RedisConnInfo final : SharedObject
|
|||
Atomic<bool> m_Connecting, m_Connected, m_Started;
|
||||
|
||||
struct {
|
||||
// Items to be send to Redis
|
||||
std::map<QueryPriority, std::queue<WriteQueueItem>> Writes;
|
||||
std::queue<WriteQueueItem> HighWriteQ; // High priority writes to be sent to Redis.
|
||||
std::queue<WriteQueueItem> NormalWriteQ; // Normal priority writes to be sent to Redis.
|
||||
// Requestors, each waiting for a single response
|
||||
std::queue<std::promise<Reply>> ReplyPromises;
|
||||
// Requestors, each waiting for multiple responses at once
|
||||
std::queue<std::promise<Replies>> RepliesPromises;
|
||||
// Metadata about all of the above
|
||||
std::queue<FutureResponseAction> FutureResponseActions;
|
||||
} m_Queues;
|
||||
|
||||
// Kinds of queries not to actually send yet
|
||||
std::set<QueryPriority> m_SuppressedQueryKinds;
|
||||
WriteQueueItem PopFront()
|
||||
{
|
||||
if (!HighWriteQ.empty()) {
|
||||
WriteQueueItem item(std::move(HighWriteQ.front()));
|
||||
HighWriteQ.pop();
|
||||
return item;
|
||||
}
|
||||
WriteQueueItem item(std::move(NormalWriteQ.front()));
|
||||
NormalWriteQ.pop();
|
||||
return item;
|
||||
}
|
||||
|
||||
void Push(WriteQueueItem&& item, bool highPriority)
|
||||
{
|
||||
if (highPriority) {
|
||||
HighWriteQ.push(std::move(item));
|
||||
} else {
|
||||
NormalWriteQ.push(std::move(item));
|
||||
}
|
||||
}
|
||||
|
||||
bool HasWrites() const
|
||||
{
|
||||
return !HighWriteQ.empty() || !NormalWriteQ.empty();
|
||||
}
|
||||
} m_Queues;
|
||||
|
||||
// Indicate that there's something to send/receive
|
||||
AsioEvent m_QueuedWrites;
|
||||
|
|
|
|||
Loading…
Reference in a new issue