repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | ceph-main/src/crimson/osd/ops_executer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include <type_traits>
#include <utility>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <fmt/os.h>
#include <seastar/core/chunked_fifo.hh>
#include <seastar/core/future.hh>
#include <seastar/core/shared_future.hh>
#include <seastar/core/shared_ptr.hh>
#include "common/dout.h"
#include "common/map_cacher.hpp"
#include "common/static_ptr.h"
#include "messages/MOSDOp.h"
#include "os/Transaction.h"
#include "osd/osd_types.h"
#include "crimson/common/errorator.h"
#include "crimson/common/interruptible_future.h"
#include "crimson/common/type_helpers.h"
#include "crimson/osd/osd_operations/client_request.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/pg_backend.h"
#include "crimson/osd/pg_interval_interrupt_condition.h"
#include "crimson/osd/shard_services.h"
struct ObjectState;
struct OSDOp;
class OSDriver;
class SnapMapper;
namespace crimson::osd {
class PG;
// OpsExecuter -- a class for executing ops targeting a certain object.
class OpsExecuter : public seastar::enable_lw_shared_from_this<OpsExecuter> {
friend class SnapTrimObjSubEvent;
using call_errorator = crimson::errorator<
crimson::stateful_ec,
crimson::ct_error::enoent,
crimson::ct_error::eexist,
crimson::ct_error::enospc,
crimson::ct_error::edquot,
crimson::ct_error::cmp_fail,
crimson::ct_error::eagain,
crimson::ct_error::invarg,
crimson::ct_error::erange,
crimson::ct_error::ecanceled,
crimson::ct_error::enametoolong,
crimson::ct_error::permission_denied,
crimson::ct_error::operation_not_supported,
crimson::ct_error::input_output_error,
crimson::ct_error::value_too_large,
crimson::ct_error::file_too_large>;
using read_errorator = PGBackend::read_errorator;
using write_ertr = PGBackend::write_ertr;
using get_attr_errorator = PGBackend::get_attr_errorator;
using watch_errorator = crimson::errorator<
crimson::ct_error::enoent,
crimson::ct_error::invarg,
crimson::ct_error::not_connected,
crimson::ct_error::timed_out>;
using call_ierrorator =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, call_errorator>;
using read_ierrorator =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, read_errorator>;
using write_iertr =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, write_ertr>;
using get_attr_ierrorator =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, get_attr_errorator>;
using watch_ierrorator =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, watch_errorator>;
template <typename Errorator, typename T = void>
using interruptible_errorated_future =
::crimson::interruptible::interruptible_errorated_future<
IOInterruptCondition, Errorator, T>;
using interruptor =
::crimson::interruptible::interruptor<IOInterruptCondition>;
template <typename T = void>
using interruptible_future =
::crimson::interruptible::interruptible_future<
IOInterruptCondition, T>;
public:
// ExecutableMessage -- an interface class to allow using OpsExecuter
// with other message types than just the `MOSDOp`. The type erasure
// happens in the ctor of `OpsExecuter`.
struct ExecutableMessage {
virtual osd_reqid_t get_reqid() const = 0;
virtual utime_t get_mtime() const = 0;
virtual epoch_t get_map_epoch() const = 0;
virtual entity_inst_t get_orig_source_inst() const = 0;
virtual uint64_t get_features() const = 0;
virtual bool has_flag(uint32_t flag) const = 0;
virtual entity_name_t get_source() const = 0;
};
template <class ImplT>
class ExecutableMessagePimpl final : ExecutableMessage {
const ImplT* pimpl;
// In crimson, conn is independently maintained outside Message.
const crimson::net::ConnectionRef conn;
public:
ExecutableMessagePimpl(const ImplT* pimpl,
const crimson::net::ConnectionRef conn)
: pimpl(pimpl), conn(conn) {
}
osd_reqid_t get_reqid() const final {
return pimpl->get_reqid();
}
bool has_flag(uint32_t flag) const final {
return pimpl->has_flag(flag);
}
utime_t get_mtime() const final {
return pimpl->get_mtime();
};
epoch_t get_map_epoch() const final {
return pimpl->get_map_epoch();
}
entity_inst_t get_orig_source_inst() const final {
// We can't get the origin source address from the message
// since (In Crimson) the connection is maintained
// outside of the Message.
return entity_inst_t(get_source(), conn->get_peer_addr());
}
entity_name_t get_source() const final {
return pimpl->get_source();
}
uint64_t get_features() const final {
return pimpl->get_features();
}
};
// because OpsExecuter is pretty heavy-weight object we want to ensure
// it's not copied nor even moved by accident. Performance is the sole
// reason for prohibiting that.
OpsExecuter(OpsExecuter&&) = delete;
OpsExecuter(const OpsExecuter&) = delete;
using osd_op_errorator = crimson::compound_errorator_t<
call_errorator,
read_errorator,
write_ertr,
get_attr_errorator,
watch_errorator,
PGBackend::stat_errorator>;
using osd_op_ierrorator =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, osd_op_errorator>;
object_stat_sum_t delta_stats;
private:
// an operation can be divided into two stages: main and effect-exposing
// one. The former is performed immediately on call to `do_osd_op()` while
// the later on `submit_changes()` – after successfully processing main
// stages of all involved operations. When any stage fails, none of all
// scheduled effect-exposing stages will be executed.
// when operation requires this division, some variant of `with_effect()`
// should be used.
struct effect_t {
// an effect can affect PG, i.e. create a watch timeout
virtual osd_op_errorator::future<> execute(Ref<PG> pg) = 0;
virtual ~effect_t() = default;
};
Ref<PG> pg; // for the sake of object class
ObjectContextRef obc;
const OpInfo& op_info;
using abstracted_msg_t =
ceph::static_ptr<ExecutableMessage,
sizeof(ExecutableMessagePimpl<void>)>;
abstracted_msg_t msg;
crimson::net::ConnectionRef conn;
std::optional<osd_op_params_t> osd_op_params;
bool user_modify = false;
ceph::os::Transaction txn;
size_t num_read = 0; ///< count read ops
size_t num_write = 0; ///< count update ops
SnapContext snapc; // writer snap context
struct CloningContext {
SnapSet new_snapset;
pg_log_entry_t log_entry;
void apply_to(
std::vector<pg_log_entry_t>& log_entries,
ObjectContext& processed_obc) &&;
};
std::unique_ptr<CloningContext> cloning_ctx;
/**
* execute_clone
*
* If snapc contains a snap which occurred logically after the last write
* seen by this object (see OpsExecutor::should_clone()), we first need
* make a clone of the object at its current state. execute_clone primes
* txn with that clone operation and returns an
* OpsExecutor::CloningContext which will allow us to fill in the corresponding
* metadata and log_entries once the operations have been processed.
*
* Note that this strategy differs from classic, which instead performs this
* work at the end and reorders the transaction. See
* PrimaryLogPG::make_writeable
*
* @param snapc [in] snapc for this operation (from the client if from the
* client, from the pool otherwise)
* @param initial_obs [in] objectstate for the object at operation start
* @param initial_snapset [in] snapset for the object at operation start
* @param backend [in,out] interface for generating mutations
* @param txn [out] transaction for the operation
*/
std::unique_ptr<CloningContext> execute_clone(
const SnapContext& snapc,
const ObjectState& initial_obs,
const SnapSet& initial_snapset,
PGBackend& backend,
ceph::os::Transaction& txn);
/**
* should_clone
*
* Predicate returning whether a user write with snap context snapc
* contains a snap which occurred prior to the most recent write
* on the object reflected in initial_obc.
*
* @param initial_obc [in] obc for object to be mutated
* @param snapc [in] snapc for this operation (from the client if from the
* client, from the pool otherwise)
*/
static bool should_clone(
const ObjectContext& initial_obc,
const SnapContext& snapc) {
// clone?
return initial_obc.obs.exists // both nominally and...
&& !initial_obc.obs.oi.is_whiteout() // ... logically exists
&& snapc.snaps.size() // there are snaps
&& snapc.snaps[0] > initial_obc.ssc->snapset.seq; // existing obj is old
}
interruptible_future<std::vector<pg_log_entry_t>> flush_clone_metadata(
std::vector<pg_log_entry_t>&& log_entries,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn);
static interruptible_future<> snap_map_remove(
const hobject_t& soid,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn);
static interruptible_future<> snap_map_modify(
const hobject_t& soid,
const std::set<snapid_t>& snaps,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn);
static interruptible_future<> snap_map_clone(
const hobject_t& soid,
const std::set<snapid_t>& snaps,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn);
// this gizmo could be wrapped in std::optional for the sake of lazy
// initialization. we don't need it for ops that doesn't have effect
// TODO: verify the init overhead of chunked_fifo
seastar::chunked_fifo<std::unique_ptr<effect_t>> op_effects;
template <class Context, class MainFunc, class EffectFunc>
auto with_effect_on_obc(
Context&& ctx,
MainFunc&& main_func,
EffectFunc&& effect_func);
call_ierrorator::future<> do_op_call(OSDOp& osd_op);
watch_ierrorator::future<> do_op_watch(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn);
watch_ierrorator::future<> do_op_watch_subop_watch(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn);
watch_ierrorator::future<> do_op_watch_subop_reconnect(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn);
watch_ierrorator::future<> do_op_watch_subop_unwatch(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn);
watch_ierrorator::future<> do_op_watch_subop_ping(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn);
watch_ierrorator::future<> do_op_list_watchers(
OSDOp& osd_op,
const ObjectState& os);
watch_ierrorator::future<> do_op_notify(
OSDOp& osd_op,
const ObjectState& os);
watch_ierrorator::future<> do_op_notify_ack(
OSDOp& osd_op,
const ObjectState& os);
call_errorator::future<> do_assert_ver(
OSDOp& osd_op,
const ObjectState& os);
using list_snaps_ertr = read_errorator::extend<
crimson::ct_error::invarg>;
using list_snaps_iertr = ::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
list_snaps_ertr>;
list_snaps_iertr::future<> do_list_snaps(
OSDOp& osd_op,
const ObjectState& os,
const SnapSet& ss);
template <class Func>
auto do_const_op(Func&& f);
template <class Func>
auto do_read_op(Func&& f) {
++num_read;
// TODO: pass backend as read-only
return do_const_op(std::forward<Func>(f));
}
template <class Func>
auto do_snapset_op(Func&& f) {
++num_read;
return std::invoke(
std::forward<Func>(f),
std::as_const(obc->obs),
std::as_const(obc->ssc->snapset));
}
enum class modified_by {
user,
sys,
};
template <class Func>
auto do_write_op(Func&& f, modified_by m = modified_by::user);
decltype(auto) dont_do_legacy_op() {
return crimson::ct_error::operation_not_supported::make();
}
interruptible_errorated_future<osd_op_errorator>
do_execute_op(OSDOp& osd_op);
OpsExecuter(Ref<PG> pg,
ObjectContextRef obc,
const OpInfo& op_info,
abstracted_msg_t&& msg,
crimson::net::ConnectionRef conn,
const SnapContext& snapc);
public:
template <class MsgT>
OpsExecuter(Ref<PG> pg,
ObjectContextRef obc,
const OpInfo& op_info,
const MsgT& msg,
crimson::net::ConnectionRef conn,
const SnapContext& snapc)
: OpsExecuter(
std::move(pg),
std::move(obc),
op_info,
abstracted_msg_t{
std::in_place_type_t<ExecutableMessagePimpl<MsgT>>{},
&msg,
conn},
conn,
snapc) {
}
template <class Func>
struct RollbackHelper;
template <class Func>
RollbackHelper<Func> create_rollbacker(Func&& func);
interruptible_errorated_future<osd_op_errorator>
execute_op(OSDOp& osd_op);
using rep_op_fut_tuple =
std::tuple<interruptible_future<>, osd_op_ierrorator::future<>>;
using rep_op_fut_t =
interruptible_future<rep_op_fut_tuple>;
template <typename MutFunc>
rep_op_fut_t flush_changes_n_do_ops_effects(
const std::vector<OSDOp>& ops,
SnapMapper& snap_mapper,
OSDriver& osdriver,
MutFunc&& mut_func) &&;
std::vector<pg_log_entry_t> prepare_transaction(
const std::vector<OSDOp>& ops);
void fill_op_params_bump_pg_version();
ObjectContextRef get_obc() const {
return obc;
}
const object_info_t &get_object_info() const {
return obc->obs.oi;
}
const hobject_t &get_target() const {
return get_object_info().soid;
}
const auto& get_message() const {
return *msg;
}
size_t get_processed_rw_ops_num() const {
return num_read + num_write;
}
uint32_t get_pool_stripe_width() const;
bool has_seen_write() const {
return num_write > 0;
}
object_stat_sum_t& get_stats(){
return delta_stats;
}
version_t get_last_user_version() const;
std::pair<object_info_t, ObjectContextRef> prepare_clone(
const hobject_t& coid);
void apply_stats();
};
template <class Context, class MainFunc, class EffectFunc>
auto OpsExecuter::with_effect_on_obc(
Context&& ctx,
MainFunc&& main_func,
EffectFunc&& effect_func)
{
using context_t = std::decay_t<Context>;
// the language offers implicit conversion to pointer-to-function for
// lambda only when it's closureless. We enforce this restriction due
// the fact that `flush_changes()` std::moves many executer's parts.
using allowed_effect_func_t =
seastar::future<> (*)(context_t&&, ObjectContextRef, Ref<PG>);
static_assert(std::is_convertible_v<EffectFunc, allowed_effect_func_t>,
"with_effect function is not allowed to capture");
struct task_t final : effect_t {
context_t ctx;
EffectFunc effect_func;
ObjectContextRef obc;
task_t(Context&& ctx, EffectFunc&& effect_func, ObjectContextRef obc)
: ctx(std::move(ctx)),
effect_func(std::move(effect_func)),
obc(std::move(obc)) {
}
osd_op_errorator::future<> execute(Ref<PG> pg) final {
return std::move(effect_func)(std::move(ctx),
std::move(obc),
std::move(pg));
}
};
auto task =
std::make_unique<task_t>(std::move(ctx), std::move(effect_func), obc);
auto& ctx_ref = task->ctx;
op_effects.emplace_back(std::move(task));
return std::forward<MainFunc>(main_func)(ctx_ref);
}
template <typename MutFunc>
OpsExecuter::rep_op_fut_t
OpsExecuter::flush_changes_n_do_ops_effects(
const std::vector<OSDOp>& ops,
SnapMapper& snap_mapper,
OSDriver& osdriver,
MutFunc&& mut_func) &&
{
const bool want_mutate = !txn.empty();
// osd_op_params are instantiated by every wr-like operation.
assert(osd_op_params || !want_mutate);
assert(obc);
rep_op_fut_t maybe_mutated =
interruptor::make_ready_future<rep_op_fut_tuple>(
seastar::now(),
interruptor::make_interruptible(osd_op_errorator::now()));
if (cloning_ctx) {
ceph_assert(want_mutate);
}
if (want_mutate) {
if (user_modify) {
osd_op_params->user_at_version = osd_op_params->at_version.version;
}
maybe_mutated = flush_clone_metadata(
prepare_transaction(ops),
snap_mapper,
osdriver,
txn
).then_interruptible([mut_func=std::move(mut_func),
this](auto&& log_entries) mutable {
auto [submitted, all_completed] =
std::forward<MutFunc>(mut_func)(std::move(txn),
std::move(obc),
std::move(*osd_op_params),
std::move(log_entries));
return interruptor::make_ready_future<rep_op_fut_tuple>(
std::move(submitted),
osd_op_ierrorator::future<>(std::move(all_completed)));
});
}
apply_stats();
if (__builtin_expect(op_effects.empty(), true)) {
return maybe_mutated;
} else {
return maybe_mutated.then_unpack_interruptible(
// need extra ref pg due to apply_stats() which can be executed after
// informing snap mapper
[this, pg=this->pg](auto&& submitted, auto&& all_completed) mutable {
return interruptor::make_ready_future<rep_op_fut_tuple>(
std::move(submitted),
all_completed.safe_then_interruptible([this, pg=std::move(pg)] {
// let's do the cleaning of `op_effects` in destructor
return interruptor::do_for_each(op_effects,
[pg=std::move(pg)](auto& op_effect) {
return op_effect->execute(pg);
});
}));
});
}
}
template <class Func>
struct OpsExecuter::RollbackHelper {
interruptible_future<> rollback_obc_if_modified(const std::error_code& e);
ObjectContextRef get_obc() const {
assert(ox);
return ox->obc;
}
seastar::lw_shared_ptr<OpsExecuter> ox;
Func func;
};
template <class Func>
inline OpsExecuter::RollbackHelper<Func>
OpsExecuter::create_rollbacker(Func&& func) {
return {shared_from_this(), std::forward<Func>(func)};
}
template <class Func>
OpsExecuter::interruptible_future<>
OpsExecuter::RollbackHelper<Func>::rollback_obc_if_modified(
const std::error_code& e)
{
// Oops, an operation had failed. do_osd_ops() altogether with
// OpsExecuter already dropped the ObjectStore::Transaction if
// there was any. However, this is not enough to completely
// rollback as we gave OpsExecuter the very single copy of `obc`
// we maintain and we did it for both reading and writing.
// Now all modifications must be reverted.
//
// Let's just reload from the store. Evicting from the shared
// LRU would be tricky as next MOSDOp (the one at `get_obc`
// phase) could actually already finished the lookup. Fortunately,
// this is supposed to live on cold paths, so performance is not
// a concern -- simplicity wins.
//
// The conditional's purpose is to efficiently handle hot errors
// which may appear as a result of e.g. CEPH_OSD_OP_CMPXATTR or
// CEPH_OSD_OP_OMAP_CMP. These are read-like ops and clients
// typically append them before any write. If OpsExecuter hasn't
// seen any modifying operation, `obc` is supposed to be kept
// unchanged.
assert(ox);
const auto need_rollback = ox->has_seen_write();
crimson::get_logger(ceph_subsys_osd).debug(
"{}: object {} got error {}, need_rollback={}",
__func__,
ox->obc->get_oid(),
e,
need_rollback);
return need_rollback ? func(*ox->obc) : interruptor::now();
}
// PgOpsExecuter -- a class for executing ops targeting a certain PG.
class PgOpsExecuter {
template <typename T = void>
using interruptible_future =
::crimson::interruptible::interruptible_future<
IOInterruptCondition, T>;
public:
PgOpsExecuter(const PG& pg, const MOSDOp& msg)
: pg(pg), nspace(msg.get_hobj().nspace) {
}
interruptible_future<> execute_op(OSDOp& osd_op);
private:
const PG& pg;
const std::string& nspace;
};
} // namespace crimson::osd
| 20,498 | 31.538095 | 81 | h |
null | ceph-main/src/crimson/osd/osd.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/abort_source.hh>
#include <seastar/core/future.hh>
#include <seastar/core/shared_future.hh>
#include <seastar/core/gate.hh>
#include <seastar/core/shared_ptr.hh>
#include <seastar/core/shared_future.hh>
#include <seastar/core/timer.hh>
#include "crimson/common/logclient.h"
#include "crimson/common/type_helpers.h"
#include "crimson/common/auth_handler.h"
#include "crimson/common/gated.h"
#include "crimson/admin/admin_socket.h"
#include "crimson/common/simple_lru.h"
#include "crimson/mgr/client.h"
#include "crimson/net/Dispatcher.h"
#include "crimson/osd/osdmap_service.h"
#include "crimson/osd/pg_shard_manager.h"
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/pg_map.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/state.h"
#include "messages/MOSDOp.h"
#include "osd/PeeringState.h"
#include "osd/osd_types.h"
#include "osd/osd_perf_counters.h"
#include "osd/PGPeeringEvent.h"
class MCommand;
class MOSDMap;
class MOSDRepOpReply;
class MOSDRepOp;
class MOSDScrub2;
class OSDMeta;
class Heartbeat;
namespace ceph::os {
class Transaction;
}
namespace crimson::mon {
class Client;
}
namespace crimson::net {
class Messenger;
}
namespace crimson::os {
class FuturizedStore;
}
namespace crimson::osd {
class PG;
class OSD final : public crimson::net::Dispatcher,
private crimson::common::AuthHandler,
private crimson::mgr::WithStats {
public:
class ShardDispatcher
: public seastar::peering_sharded_service<ShardDispatcher> {
friend class OSD;
public:
ShardDispatcher(
OSD& osd,
PGShardMapping& pg_to_shard_mapping)
: pg_shard_manager(osd.osd_singleton_state,
osd.shard_services, pg_to_shard_mapping),
osd(osd) {}
~ShardDispatcher() = default;
// Dispatcher methods
seastar::future<> ms_dispatch(crimson::net::ConnectionRef, MessageRef);
private:
bool require_mon_peer(crimson::net::Connection *conn, Ref<Message> m);
seastar::future<> handle_osd_map(Ref<MOSDMap> m);
seastar::future<> _handle_osd_map(Ref<MOSDMap> m);
seastar::future<> handle_pg_create(crimson::net::ConnectionRef conn,
Ref<MOSDPGCreate2> m);
seastar::future<> handle_osd_op(crimson::net::ConnectionRef conn,
Ref<MOSDOp> m);
seastar::future<> handle_rep_op(crimson::net::ConnectionRef conn,
Ref<MOSDRepOp> m);
seastar::future<> handle_rep_op_reply(crimson::net::ConnectionRef conn,
Ref<MOSDRepOpReply> m);
seastar::future<> handle_peering_op(crimson::net::ConnectionRef conn,
Ref<MOSDPeeringOp> m);
seastar::future<> handle_recovery_subreq(crimson::net::ConnectionRef conn,
Ref<MOSDFastDispatchOp> m);
seastar::future<> handle_scrub(crimson::net::ConnectionRef conn,
Ref<MOSDScrub2> m);
seastar::future<> handle_mark_me_down(crimson::net::ConnectionRef conn,
Ref<MOSDMarkMeDown> m);
seastar::future<> committed_osd_maps(version_t first,
version_t last,
Ref<MOSDMap> m);
seastar::future<> check_osdmap_features();
seastar::future<> handle_command(crimson::net::ConnectionRef conn,
Ref<MCommand> m);
seastar::future<> handle_update_log_missing(crimson::net::ConnectionRef conn,
Ref<MOSDPGUpdateLogMissing> m);
seastar::future<> handle_update_log_missing_reply(
crimson::net::ConnectionRef conn,
Ref<MOSDPGUpdateLogMissingReply> m);
public:
void print(std::ostream&) const;
auto &get_pg_shard_manager() {
return pg_shard_manager;
}
auto &get_pg_shard_manager() const {
return pg_shard_manager;
}
ShardServices &get_shard_services() {
return pg_shard_manager.get_shard_services();
}
private:
crimson::osd::PGShardManager pg_shard_manager;
OSD& osd;
};
const int whoami;
const uint32_t nonce;
seastar::abort_source& abort_source;
seastar::timer<seastar::lowres_clock> beacon_timer;
// talk with osd
crimson::net::MessengerRef cluster_msgr;
// talk with client/mon/mgr
crimson::net::MessengerRef public_msgr;
// HB Messengers
crimson::net::MessengerRef hb_front_msgr;
crimson::net::MessengerRef hb_back_msgr;
std::unique_ptr<crimson::mon::Client> monc;
std::unique_ptr<crimson::mgr::Client> mgrc;
// TODO: use a wrapper for ObjectStore
OSDMapService::cached_map_t osdmap;
crimson::os::FuturizedStore& store;
/// _first_ epoch we were marked up (after this process started)
epoch_t boot_epoch = 0;
//< epoch we last did a bind to new ip:ports
epoch_t bind_epoch = 0;
//< since when there is no more pending pg creates from mon
epoch_t last_pg_create_epoch = 0;
ceph::mono_time startup_time;
seastar::shared_mutex handle_osd_map_lock;
OSDSuperblock superblock;
// Dispatcher methods
std::optional<seastar::future<>> ms_dispatch(crimson::net::ConnectionRef, MessageRef) final;
void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) final;
void ms_handle_remote_reset(crimson::net::ConnectionRef conn) final;
// mgr::WithStats methods
// pg statistics including osd ones
osd_stat_t osd_stat;
uint32_t osd_stat_seq = 0;
void update_stats();
seastar::future<MessageURef> get_stats() const final;
// AuthHandler methods
void handle_authentication(const EntityName& name,
const AuthCapsInfo& caps) final;
seastar::sharded<PGShardMapping> pg_to_shard_mappings;
seastar::sharded<OSDSingletonState> osd_singleton_state;
seastar::sharded<OSDState> osd_states;
seastar::sharded<ShardServices> shard_services;
seastar::sharded<ShardDispatcher> shard_dispatchers;
std::unique_ptr<Heartbeat> heartbeat;
seastar::timer<seastar::lowres_clock> tick_timer;
// admin-socket
seastar::lw_shared_ptr<crimson::admin::AdminSocket> asok;
public:
OSD(int id, uint32_t nonce,
seastar::abort_source& abort_source,
crimson::os::FuturizedStore& store,
crimson::net::MessengerRef cluster_msgr,
crimson::net::MessengerRef client_msgr,
crimson::net::MessengerRef hb_front_msgr,
crimson::net::MessengerRef hb_back_msgr);
~OSD() final;
seastar::future<> open_meta_coll();
static seastar::future<OSDMeta> open_or_create_meta_coll(
crimson::os::FuturizedStore &store
);
static seastar::future<> mkfs(
crimson::os::FuturizedStore &store,
unsigned whoami,
uuid_d osd_uuid,
uuid_d cluster_fsid,
std::string osdspec_affinity);
seastar::future<> start();
seastar::future<> stop();
void dump_status(Formatter*) const;
void print(std::ostream&) const;
/// @return the seq id of the pg stats being sent
uint64_t send_pg_stats();
auto &get_shard_services() {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return shard_services.local();
}
auto &get_pg_shard_manager() {
return shard_dispatchers.local().get_pg_shard_manager();
}
auto &get_pg_shard_manager() const {
return shard_dispatchers.local().get_pg_shard_manager();
}
private:
static seastar::future<> _write_superblock(
crimson::os::FuturizedStore &store,
OSDMeta meta,
OSDSuperblock superblock);
static seastar::future<> _write_key_meta(
crimson::os::FuturizedStore &store
);
seastar::future<> start_boot();
seastar::future<> _preboot(version_t oldest_osdmap, version_t newest_osdmap);
seastar::future<> _send_boot();
seastar::future<> _add_me_to_crush();
seastar::future<> osdmap_subscribe(version_t epoch, bool force_request);
seastar::future<> start_asok_admin();
void write_superblock(ceph::os::Transaction& t);
seastar::future<> read_superblock();
private:
crimson::common::Gated gate;
seastar::promise<> stop_acked;
void got_stop_ack() {
stop_acked.set_value();
}
seastar::future<> prepare_to_stop();
bool should_restart() const;
seastar::future<> restart();
seastar::future<> shutdown();
seastar::future<> update_heartbeat_peers();
friend class PGAdvanceMap;
public:
seastar::future<> send_beacon();
private:
LogClient log_client;
LogChannelRef clog;
};
inline std::ostream& operator<<(std::ostream& out, const OSD& osd) {
osd.print(out);
return out;
}
inline std::ostream& operator<<(std::ostream& out,
const OSD::ShardDispatcher& shard_dispatcher) {
shard_dispatcher.print(out);
return out;
}
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::OSD> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::osd::OSD::ShardDispatcher> : fmt::ostream_formatter {};
#endif
| 9,074 | 29.45302 | 98 | h |
null | ceph-main/src/crimson/osd/osd_connection_priv.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/net/Connection.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/osd_operations/client_request.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/osd_operations/replicated_request.h"
namespace crimson::osd {
struct OSDConnectionPriv : public crimson::net::Connection::user_private_t {
ConnectionPipeline client_request_conn_pipeline;
ConnectionPipeline peering_request_conn_pipeline;
ConnectionPipeline replicated_request_conn_pipeline;
};
static OSDConnectionPriv &get_osd_priv(crimson::net::Connection *conn) {
if (!conn->has_user_private()) {
conn->set_user_private(std::make_unique<OSDConnectionPriv>());
}
return static_cast<OSDConnectionPriv&>(conn->get_user_private());
}
}
| 874 | 30.25 | 76 | h |
null | ceph-main/src/crimson/osd/osd_meta.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <map>
#include <string>
#include <seastar/core/future.hh>
#include "osd/osd_types.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/os/futurized_store.h"
namespace ceph::os {
class Transaction;
}
namespace crimson::os {
class FuturizedCollection;
class FuturizedStore;
}
/// metadata shared across PGs, or put in another way,
/// metadata not specific to certain PGs.
class OSDMeta {
template<typename T> using Ref = boost::intrusive_ptr<T>;
crimson::os::FuturizedStore::Shard& store;
Ref<crimson::os::FuturizedCollection> coll;
public:
OSDMeta(Ref<crimson::os::FuturizedCollection> coll,
crimson::os::FuturizedStore::Shard& store)
: store{store}, coll{coll}
{}
auto collection() {
return coll;
}
void create(ceph::os::Transaction& t);
void store_map(ceph::os::Transaction& t,
epoch_t e, const bufferlist& m);
seastar::future<bufferlist> load_map(epoch_t e);
void store_superblock(ceph::os::Transaction& t,
const OSDSuperblock& sb);
using load_superblock_ertr = crimson::os::FuturizedStore::Shard::read_errorator;
using load_superblock_ret = load_superblock_ertr::future<OSDSuperblock>;
load_superblock_ret load_superblock();
using ec_profile_t = std::map<std::string, std::string>;
seastar::future<std::tuple<pg_pool_t,
std::string,
ec_profile_t>> load_final_pool_info(int64_t pool);
private:
static ghobject_t osdmap_oid(epoch_t epoch);
static ghobject_t final_pool_info_oid(int64_t pool);
static ghobject_t superblock_oid();
};
| 1,698 | 26.852459 | 82 | h |
null | ceph-main/src/crimson/osd/osd_operation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/common/operation.h"
#include "crimson/osd/pg_interval_interrupt_condition.h"
#include "crimson/osd/scheduler/scheduler.h"
#include "osd/osd_types.h"
namespace crimson::os::seastore {
template<class OpT>
class OperationProxyT;
}
namespace crimson::osd {
/// Ordering stages for a class of operations ordered by PG.
struct ConnectionPipeline {
struct AwaitActive : OrderedExclusivePhaseT<AwaitActive> {
static constexpr auto type_name =
"ConnectionPipeline::await_active";
} await_active;
struct AwaitMap : OrderedExclusivePhaseT<AwaitMap> {
static constexpr auto type_name =
"ConnectionPipeline::await_map";
} await_map;
struct GetPG : OrderedExclusivePhaseT<GetPG> {
static constexpr auto type_name =
"ConnectionPipeline::get_pg";
} get_pg;
};
enum class OperationTypeCode {
client_request = 0,
peering_event,
pg_advance_map,
pg_creation,
replicated_request,
background_recovery,
background_recovery_sub,
internal_client_request,
historic_client_request,
logmissing_request,
logmissing_request_reply,
snaptrim_event,
snaptrimobj_subevent,
last_op
};
static constexpr const char* const OP_NAMES[] = {
"client_request",
"peering_event",
"pg_advance_map",
"pg_creation",
"replicated_request",
"background_recovery",
"background_recovery_sub",
"internal_client_request",
"historic_client_request",
"logmissing_request",
"logmissing_request_reply",
"snaptrim_event",
"snaptrimobj_subevent",
};
// prevent the addition of OperationTypeCode-s with no matching OP_NAMES entry:
static_assert(
(sizeof(OP_NAMES)/sizeof(OP_NAMES[0])) ==
static_cast<int>(OperationTypeCode::last_op));
struct InterruptibleOperation : Operation {
template <typename ValuesT = void>
using interruptible_future =
::crimson::interruptible::interruptible_future<
::crimson::osd::IOInterruptCondition, ValuesT>;
using interruptor =
::crimson::interruptible::interruptor<
::crimson::osd::IOInterruptCondition>;
};
template <typename T>
struct OperationT : InterruptibleOperation {
static constexpr const char *type_name = OP_NAMES[static_cast<int>(T::type)];
using IRef = boost::intrusive_ptr<T>;
using ICRef = boost::intrusive_ptr<const T>;
unsigned get_type() const final {
return static_cast<unsigned>(T::type);
}
const char *get_type_name() const final {
return T::type_name;
}
virtual ~OperationT() = default;
private:
virtual void dump_detail(ceph::Formatter *f) const = 0;
};
template <class T>
class TrackableOperationT : public OperationT<T> {
T* that() {
return static_cast<T*>(this);
}
const T* that() const {
return static_cast<const T*>(this);
}
protected:
template<class EventT>
decltype(auto) get_event() {
// all out derivates are supposed to define the list of tracking
// events accessible via `std::get`. This will usually boil down
// into an instance of `std::tuple`.
return std::get<EventT>(that()->tracking_events);
}
template<class EventT>
decltype(auto) get_event() const {
return std::get<EventT>(that()->tracking_events);
}
using OperationT<T>::OperationT;
struct StartEvent : TimeEvent<StartEvent> {};
struct CompletionEvent : TimeEvent<CompletionEvent> {};
template <class EventT, class... Args>
void track_event(Args&&... args) {
// the idea is to have a visitor-like interface that allows to double
// dispatch (backend, blocker type)
get_event<EventT>().trigger(*that(), std::forward<Args>(args)...);
}
template <class BlockingEventT, class InterruptorT=void, class F>
auto with_blocking_event(F&& f) {
auto ret = std::forward<F>(f)(typename BlockingEventT::template Trigger<T>{
get_event<BlockingEventT>(), *that()
});
if constexpr (std::is_same_v<InterruptorT, void>) {
return ret;
} else {
using ret_t = decltype(ret);
return typename InterruptorT::template futurize_t<ret_t>{std::move(ret)};
}
}
public:
static constexpr bool is_trackable = true;
};
template <class T>
class PhasedOperationT : public TrackableOperationT<T> {
using base_t = TrackableOperationT<T>;
T* that() {
return static_cast<T*>(this);
}
const T* that() const {
return static_cast<const T*>(this);
}
protected:
using TrackableOperationT<T>::TrackableOperationT;
template <class InterruptorT=void, class StageT>
auto enter_stage(StageT& stage) {
return this->template with_blocking_event<typename StageT::BlockingEvent,
InterruptorT>(
[&stage, this] (auto&& trigger) {
// delegated storing the pipeline handle to let childs to match
// the lifetime of pipeline with e.g. ConnectedSocket (important
// for ConnectionPipeline).
return that()->get_handle().template enter<T>(stage, std::move(trigger));
});
}
template <class OpT>
friend class crimson::os::seastore::OperationProxyT;
// PGShardManager::start_pg_operation needs access to enter_stage, we can make this
// more sophisticated later on
friend class PGShardManager;
};
/**
* Maintains a set of lists of all active ops.
*/
struct OSDOperationRegistry : OperationRegistryT<
static_cast<size_t>(OperationTypeCode::last_op)
> {
OSDOperationRegistry();
void do_stop() override;
void put_historic(const class ClientRequest& op);
size_t dump_historic_client_requests(ceph::Formatter* f) const;
size_t dump_slowest_historic_client_requests(ceph::Formatter* f) const;
private:
op_list::const_iterator last_of_recents;
size_t num_recent_ops = 0;
size_t num_slow_ops = 0;
};
/**
* Throttles set of currently running operations
*
* Very primitive currently, assumes all ops are equally
* expensive and simply limits the number that can be
* concurrently active.
*/
class OperationThrottler : public BlockerT<OperationThrottler>,
private md_config_obs_t {
friend BlockerT<OperationThrottler>;
static constexpr const char* type_name = "OperationThrottler";
template <typename OperationT, typename F>
auto with_throttle(
OperationT* op,
crimson::osd::scheduler::params_t params,
F &&f) {
if (!max_in_progress) return f();
return acquire_throttle(params)
.then(std::forward<F>(f))
.then([this](auto x) {
release_throttle();
return x;
});
}
template <typename OperationT, typename F>
seastar::future<> with_throttle_while(
OperationT* op,
crimson::osd::scheduler::params_t params,
F &&f) {
return with_throttle(op, params, f).then([this, params, op, f](bool cont) {
return cont ? with_throttle_while(op, params, f) : seastar::now();
});
}
public:
OperationThrottler(ConfigProxy &conf);
const char** get_tracked_conf_keys() const final;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed) final;
void update_from_config(const ConfigProxy &conf);
template <class OpT, class... Args>
seastar::future<> with_throttle_while(
BlockingEvent::Trigger<OpT>&& trigger,
Args&&... args) {
return trigger.maybe_record_blocking(
with_throttle_while(std::forward<Args>(args)...), *this);
}
private:
void dump_detail(Formatter *f) const final;
crimson::osd::scheduler::SchedulerRef scheduler;
uint64_t max_in_progress = 0;
uint64_t in_progress = 0;
uint64_t pending = 0;
void wake();
seastar::future<> acquire_throttle(
crimson::osd::scheduler::params_t params);
void release_throttle();
};
}
| 7,670 | 26.202128 | 85 | h |
null | ceph-main/src/crimson/osd/osd_operation_external_tracking.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/osd/osd.h"
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/osd_operations/background_recovery.h"
#include "crimson/osd/osd_operations/client_request.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/osd_operations/pg_advance_map.h"
#include "crimson/osd/osd_operations/recovery_subrequest.h"
#include "crimson/osd/osd_operations/replicated_request.h"
#include "crimson/osd/osd_operations/snaptrim_event.h"
#include "crimson/osd/pg_activation_blocker.h"
#include "crimson/osd/pg_map.h"
namespace crimson::osd {
// Just the boilerplate currently. Implementing
struct LttngBackend
: ClientRequest::StartEvent::Backend,
ConnectionPipeline::AwaitActive::BlockingEvent::Backend,
ConnectionPipeline::AwaitMap::BlockingEvent::Backend,
ConnectionPipeline::GetPG::BlockingEvent::Backend,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend,
PGMap::PGCreationBlockingEvent::Backend,
ClientRequest::PGPipeline::AwaitMap::BlockingEvent::Backend,
PG_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend,
ClientRequest::PGPipeline::WaitForActive::BlockingEvent::Backend,
PGActivationBlocker::BlockingEvent::Backend,
ClientRequest::PGPipeline::RecoverMissing::BlockingEvent::Backend,
ClientRequest::PGPipeline::GetOBC::BlockingEvent::Backend,
ClientRequest::PGPipeline::Process::BlockingEvent::Backend,
ClientRequest::PGPipeline::WaitRepop::BlockingEvent::Backend,
ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent::Backend,
ClientRequest::PGPipeline::SendReply::BlockingEvent::Backend,
ClientRequest::CompletionEvent::Backend
{
void handle(ClientRequest::StartEvent&,
const Operation&) override {}
void handle(ConnectionPipeline::AwaitActive::BlockingEvent& ev,
const Operation& op,
const ConnectionPipeline::AwaitActive& blocker) override {
}
void handle(ConnectionPipeline::AwaitMap::BlockingEvent& ev,
const Operation& op,
const ConnectionPipeline::AwaitMap& blocker) override {
}
void handle(OSD_OSDMapGate::OSDMapBlocker::BlockingEvent&,
const Operation&,
const OSD_OSDMapGate::OSDMapBlocker&) override {
}
void handle(ConnectionPipeline::GetPG::BlockingEvent& ev,
const Operation& op,
const ConnectionPipeline::GetPG& blocker) override {
}
void handle(PGMap::PGCreationBlockingEvent&,
const Operation&,
const PGMap::PGCreationBlocker&) override {
}
void handle(ClientRequest::PGPipeline::AwaitMap::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::AwaitMap& blocker) override {
}
void handle(PG_OSDMapGate::OSDMapBlocker::BlockingEvent&,
const Operation&,
const PG_OSDMapGate::OSDMapBlocker&) override {
}
void handle(ClientRequest::PGPipeline::WaitForActive::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::WaitForActive& blocker) override {
}
void handle(PGActivationBlocker::BlockingEvent& ev,
const Operation& op,
const PGActivationBlocker& blocker) override {
}
void handle(ClientRequest::PGPipeline::RecoverMissing::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::RecoverMissing& blocker) override {
}
void handle(ClientRequest::PGPipeline::GetOBC::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::GetOBC& blocker) override {
}
void handle(ClientRequest::PGPipeline::Process::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::Process& blocker) override {
}
void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::WaitRepop& blocker) override {
}
void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent& ev,
const Operation& op) override {
}
void handle(ClientRequest::PGPipeline::SendReply::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::SendReply& blocker) override {
}
void handle(ClientRequest::CompletionEvent&,
const Operation&) override {}
};
struct HistoricBackend
: ClientRequest::StartEvent::Backend,
ConnectionPipeline::AwaitActive::BlockingEvent::Backend,
ConnectionPipeline::AwaitMap::BlockingEvent::Backend,
ConnectionPipeline::GetPG::BlockingEvent::Backend,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend,
PGMap::PGCreationBlockingEvent::Backend,
ClientRequest::PGPipeline::AwaitMap::BlockingEvent::Backend,
PG_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend,
ClientRequest::PGPipeline::WaitForActive::BlockingEvent::Backend,
PGActivationBlocker::BlockingEvent::Backend,
ClientRequest::PGPipeline::RecoverMissing::BlockingEvent::Backend,
ClientRequest::PGPipeline::GetOBC::BlockingEvent::Backend,
ClientRequest::PGPipeline::Process::BlockingEvent::Backend,
ClientRequest::PGPipeline::WaitRepop::BlockingEvent::Backend,
ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent::Backend,
ClientRequest::PGPipeline::SendReply::BlockingEvent::Backend,
ClientRequest::CompletionEvent::Backend
{
void handle(ClientRequest::StartEvent&,
const Operation&) override {}
void handle(ConnectionPipeline::AwaitActive::BlockingEvent& ev,
const Operation& op,
const ConnectionPipeline::AwaitActive& blocker) override {
}
void handle(ConnectionPipeline::AwaitMap::BlockingEvent& ev,
const Operation& op,
const ConnectionPipeline::AwaitMap& blocker) override {
}
void handle(OSD_OSDMapGate::OSDMapBlocker::BlockingEvent&,
const Operation&,
const OSD_OSDMapGate::OSDMapBlocker&) override {
}
void handle(ConnectionPipeline::GetPG::BlockingEvent& ev,
const Operation& op,
const ConnectionPipeline::GetPG& blocker) override {
}
void handle(PGMap::PGCreationBlockingEvent&,
const Operation&,
const PGMap::PGCreationBlocker&) override {
}
void handle(ClientRequest::PGPipeline::AwaitMap::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::AwaitMap& blocker) override {
}
void handle(PG_OSDMapGate::OSDMapBlocker::BlockingEvent&,
const Operation&,
const PG_OSDMapGate::OSDMapBlocker&) override {
}
void handle(ClientRequest::PGPipeline::WaitForActive::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::WaitForActive& blocker) override {
}
void handle(PGActivationBlocker::BlockingEvent& ev,
const Operation& op,
const PGActivationBlocker& blocker) override {
}
void handle(ClientRequest::PGPipeline::RecoverMissing::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::RecoverMissing& blocker) override {
}
void handle(ClientRequest::PGPipeline::GetOBC::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::GetOBC& blocker) override {
}
void handle(ClientRequest::PGPipeline::Process::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::Process& blocker) override {
}
void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::WaitRepop& blocker) override {
}
void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent& ev,
const Operation& op) override {
}
void handle(ClientRequest::PGPipeline::SendReply::BlockingEvent& ev,
const Operation& op,
const ClientRequest::PGPipeline::SendReply& blocker) override {
}
static const ClientRequest& to_client_request(const Operation& op) {
#ifdef NDEBUG
return static_cast<const ClientRequest&>(op);
#else
return dynamic_cast<const ClientRequest&>(op);
#endif
}
void handle(ClientRequest::CompletionEvent&, const Operation& op) override {
if (crimson::common::local_conf()->osd_op_history_size) {
to_client_request(op).put_historic();
}
}
};
} // namespace crimson::osd
namespace crimson {
template <>
struct EventBackendRegistry<osd::ClientRequest> {
static std::tuple<osd::LttngBackend, osd::HistoricBackend> get_backends() {
return { {}, {} };
}
};
template <>
struct EventBackendRegistry<osd::RemotePeeringEvent> {
static std::tuple<> get_backends() {
return {/* no extenral backends */};
}
};
template <>
struct EventBackendRegistry<osd::LocalPeeringEvent> {
static std::tuple<> get_backends() {
return {/* no extenral backends */};
}
};
template <>
struct EventBackendRegistry<osd::RepRequest> {
static std::tuple<> get_backends() {
return {/* no extenral backends */};
}
};
template <>
struct EventBackendRegistry<osd::LogMissingRequest> {
static std::tuple<> get_backends() {
return {/* no extenral backends */};
}
};
template <>
struct EventBackendRegistry<osd::LogMissingRequestReply> {
static std::tuple<> get_backends() {
return {/* no extenral backends */};
}
};
template <>
struct EventBackendRegistry<osd::RecoverySubRequest> {
static std::tuple<> get_backends() {
return {/* no extenral backends */};
}
};
template <>
struct EventBackendRegistry<osd::BackfillRecovery> {
static std::tuple<> get_backends() {
return {};
}
};
template <>
struct EventBackendRegistry<osd::PGAdvanceMap> {
static std::tuple<> get_backends() {
return {};
}
};
template <>
struct EventBackendRegistry<osd::SnapTrimObjSubEvent> {
static std::tuple<> get_backends() {
return {};
}
};
} // namespace crimson
| 10,245 | 32.266234 | 88 | h |
null | ceph-main/src/crimson/osd/osdmap_gate.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <functional>
#include <map>
#include <optional>
#include <seastar/core/future.hh>
#include <seastar/core/shared_future.hh>
#include "include/types.h"
#include "crimson/osd/osd_operation.h"
namespace ceph {
class Formatter;
}
namespace crimson::osd {
class ShardServices;
enum class OSDMapGateType {
OSD,
PG,
};
template <OSDMapGateType OSDMapGateTypeV>
class OSDMapGate {
public:
struct OSDMapBlocker : BlockerT<OSDMapBlocker> {
const char * type_name;
epoch_t epoch;
OSDMapBlocker(std::pair<const char *, epoch_t> args)
: type_name(args.first), epoch(args.second) {}
OSDMapBlocker(const OSDMapBlocker &) = delete;
OSDMapBlocker(OSDMapBlocker &&) = delete;
OSDMapBlocker &operator=(const OSDMapBlocker &) = delete;
OSDMapBlocker &operator=(OSDMapBlocker &&) = delete;
seastar::shared_promise<epoch_t> promise;
void dump_detail(Formatter *f) const final;
};
using Blocker = OSDMapBlocker;
private:
// order the promises in ascending order of the waited osdmap epoch,
// so we can access all the waiters expecting a map whose epoch is less
// than or equal to a given epoch
using waiting_peering_t = std::map<epoch_t,
OSDMapBlocker>;
const char *blocker_type;
waiting_peering_t waiting_peering;
epoch_t current = 0;
bool stopping = false;
public:
OSDMapGate(const char *blocker_type)
: blocker_type(blocker_type) {}
/**
* wait_for_map
*
* Wait for an osdmap whose epoch is greater or equal to given epoch.
* If shard_services is non-null, request map if not present.
*/
seastar::future<epoch_t>
wait_for_map(
typename OSDMapBlocker::BlockingEvent::TriggerI&& trigger,
epoch_t epoch,
ShardServices *shard_services=nullptr
);
void got_map(epoch_t epoch);
seastar::future<> stop();
};
using OSD_OSDMapGate = OSDMapGate<OSDMapGateType::OSD>;
using PG_OSDMapGate = OSDMapGate<OSDMapGateType::PG>;
}
| 2,053 | 23.452381 | 73 | h |
null | ceph-main/src/crimson/osd/osdmap_service.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/types.h"
#include "osd/OSDMap.h"
class OSDMap;
class OSDMapService {
public:
using cached_map_t = OSDMapRef;
using local_cached_map_t = LocalOSDMapRef;
virtual ~OSDMapService() = default;
virtual seastar::future<cached_map_t> get_map(epoch_t e) = 0;
/// get the latest map
virtual cached_map_t get_map() const = 0;
virtual epoch_t get_up_epoch() const = 0;
};
| 508 | 22.136364 | 70 | h |
null | ceph-main/src/crimson/osd/pg.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <memory>
#include <optional>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include <seastar/core/shared_future.hh>
#include "common/dout.h"
#include "include/interval_set.h"
#include "crimson/net/Fwd.h"
#include "messages/MOSDRepOpReply.h"
#include "messages/MOSDOpReply.h"
#include "os/Transaction.h"
#include "osd/osd_types.h"
#include "osd/osd_types_fmt.h"
#include "crimson/osd/object_context.h"
#include "osd/PeeringState.h"
#include "osd/SnapMapper.h"
#include "crimson/common/interruptible_future.h"
#include "crimson/common/type_helpers.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/osd/backfill_state.h"
#include "crimson/osd/pg_interval_interrupt_condition.h"
#include "crimson/osd/ops_executer.h"
#include "crimson/osd/osd_operations/client_request.h"
#include "crimson/osd/osd_operations/logmissing_request.h"
#include "crimson/osd/osd_operations/logmissing_request_reply.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/osd_operations/replicated_request.h"
#include "crimson/osd/shard_services.h"
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/pg_activation_blocker.h"
#include "crimson/osd/pg_recovery.h"
#include "crimson/osd/pg_recovery_listener.h"
#include "crimson/osd/recovery_backend.h"
#include "crimson/osd/object_context_loader.h"
class MQuery;
class OSDMap;
class PGBackend;
class PGPeeringEvent;
class osd_op_params_t;
namespace recovery {
class Context;
}
namespace crimson::net {
class Messenger;
}
namespace crimson::os {
class FuturizedStore;
}
namespace crimson::osd {
class OpsExecuter;
class BackfillRecovery;
class PG : public boost::intrusive_ref_counter<
PG,
boost::thread_unsafe_counter>,
public PGRecoveryListener,
PeeringState::PeeringListener,
DoutPrefixProvider
{
using ec_profile_t = std::map<std::string,std::string>;
using cached_map_t = OSDMapService::cached_map_t;
ClientRequest::PGPipeline request_pg_pipeline;
PGPeeringPipeline peering_request_pg_pipeline;
ClientRequest::Orderer client_request_orderer;
spg_t pgid;
pg_shard_t pg_whoami;
crimson::os::CollectionRef coll_ref;
ghobject_t pgmeta_oid;
seastar::timer<seastar::lowres_clock> check_readable_timer;
seastar::timer<seastar::lowres_clock> renew_lease_timer;
public:
template <typename T = void>
using interruptible_future =
::crimson::interruptible::interruptible_future<
::crimson::osd::IOInterruptCondition, T>;
PG(spg_t pgid,
pg_shard_t pg_shard,
crimson::os::CollectionRef coll_ref,
pg_pool_t&& pool,
std::string&& name,
cached_map_t osdmap,
ShardServices &shard_services,
ec_profile_t profile);
~PG();
const pg_shard_t& get_pg_whoami() const final {
return pg_whoami;
}
const spg_t& get_pgid() const final {
return pgid;
}
PGBackend& get_backend() {
return *backend;
}
const PGBackend& get_backend() const {
return *backend;
}
// EpochSource
epoch_t get_osdmap_epoch() const final {
return peering_state.get_osdmap_epoch();
}
eversion_t get_pg_trim_to() const {
return peering_state.get_pg_trim_to();
}
eversion_t get_min_last_complete_ondisk() const {
return peering_state.get_min_last_complete_ondisk();
}
const pg_info_t& get_info() const final {
return peering_state.get_info();
}
// DoutPrefixProvider
std::ostream& gen_prefix(std::ostream& out) const final {
return out << *this;
}
crimson::common::CephContext *get_cct() const final {
return shard_services.get_cct();
}
unsigned get_subsys() const final {
return ceph_subsys_osd;
}
crimson::os::CollectionRef get_collection_ref() {
return coll_ref;
}
// PeeringListener
void prepare_write(
pg_info_t &info,
pg_info_t &last_written_info,
PastIntervals &past_intervals,
PGLog &pglog,
bool dirty_info,
bool dirty_big_info,
bool need_write_epoch,
ceph::os::Transaction &t) final;
void scrub_requested(scrub_level_t scrub_level, scrub_type_t scrub_type) final;
uint64_t get_snap_trimq_size() const final {
return std::size(snap_trimq);
}
void send_cluster_message(
int osd, MessageURef m,
epoch_t epoch, bool share_map_update=false) final {
(void)shard_services.send_to_osd(osd, std::move(m), epoch);
}
void send_pg_created(pg_t pgid) final {
(void)shard_services.send_pg_created(pgid);
}
bool try_flush_or_schedule_async() final;
void start_flush_on_transaction(
ceph::os::Transaction &t) final {
t.register_on_commit(
new LambdaContext([this](int r){
peering_state.complete_flush();
}));
}
void on_flushed() final {
// will be needed for unblocking IO operations/peering
}
template <typename T>
void start_peering_event_operation(T &&evt, float delay = 0) {
(void) shard_services.start_operation<LocalPeeringEvent>(
this,
pg_whoami,
pgid,
delay,
std::forward<T>(evt));
}
void schedule_event_after(
PGPeeringEventRef event,
float delay) final {
start_peering_event_operation(std::move(*event), delay);
}
std::vector<pg_shard_t> get_replica_recovery_order() const final {
return peering_state.get_replica_recovery_order();
}
void request_local_background_io_reservation(
unsigned priority,
PGPeeringEventURef on_grant,
PGPeeringEventURef on_preempt) final {
// TODO -- we probably want to add a mechanism for blocking on this
// after handling the peering event
std::ignore = shard_services.local_request_reservation(
pgid,
on_grant ? make_lambda_context([this, on_grant=std::move(on_grant)] (int) {
start_peering_event_operation(std::move(*on_grant));
}) : nullptr,
priority,
on_preempt ? make_lambda_context(
[this, on_preempt=std::move(on_preempt)] (int) {
start_peering_event_operation(std::move(*on_preempt));
}) : nullptr);
}
void update_local_background_io_priority(
unsigned priority) final {
// TODO -- we probably want to add a mechanism for blocking on this
// after handling the peering event
std::ignore = shard_services.local_update_priority(
pgid,
priority);
}
void cancel_local_background_io_reservation() final {
// TODO -- we probably want to add a mechanism for blocking on this
// after handling the peering event
std::ignore = shard_services.local_cancel_reservation(
pgid);
}
void request_remote_recovery_reservation(
unsigned priority,
PGPeeringEventURef on_grant,
PGPeeringEventURef on_preempt) final {
// TODO -- we probably want to add a mechanism for blocking on this
// after handling the peering event
std::ignore = shard_services.remote_request_reservation(
pgid,
on_grant ? make_lambda_context([this, on_grant=std::move(on_grant)] (int) {
start_peering_event_operation(std::move(*on_grant));
}) : nullptr,
priority,
on_preempt ? make_lambda_context(
[this, on_preempt=std::move(on_preempt)] (int) {
start_peering_event_operation(std::move(*on_preempt));
}) : nullptr);
}
void cancel_remote_recovery_reservation() final {
// TODO -- we probably want to add a mechanism for blocking on this
// after handling the peering event
std::ignore = shard_services.remote_cancel_reservation(
pgid);
}
void schedule_event_on_commit(
ceph::os::Transaction &t,
PGPeeringEventRef on_commit) final {
t.register_on_commit(
make_lambda_context(
[this, on_commit=std::move(on_commit)](int) {
start_peering_event_operation(std::move(*on_commit));
}));
}
void update_heartbeat_peers(std::set<int> peers) final {
// Not needed yet
}
void set_probe_targets(const std::set<pg_shard_t> &probe_set) final {
// Not needed yet
}
void clear_probe_targets() final {
// Not needed yet
}
void queue_want_pg_temp(const std::vector<int> &wanted) final {
// TODO -- we probably want to add a mechanism for blocking on this
// after handling the peering event
std::ignore = shard_services.queue_want_pg_temp(pgid.pgid, wanted);
}
void clear_want_pg_temp() final {
// TODO -- we probably want to add a mechanism for blocking on this
// after handling the peering event
std::ignore = shard_services.remove_want_pg_temp(pgid.pgid);
}
void check_recovery_sources(const OSDMapRef& newmap) final {
// Not needed yet
}
void check_blocklisted_watchers() final;
void clear_primary_state() final {
// Not needed yet
}
void queue_check_readable(epoch_t last_peering_reset,
ceph::timespan delay) final;
void recheck_readable() final;
unsigned get_target_pg_log_entries() const final;
void on_pool_change() final {
// Not needed yet
}
void on_role_change() final {
// Not needed yet
}
void on_change(ceph::os::Transaction &t) final;
void on_activate(interval_set<snapid_t> to_trim) final;
void on_activate_complete() final;
void on_new_interval() final {
// Not needed yet
}
Context *on_clean() final;
void on_activate_committed() final {
// Not needed yet (will be needed for IO unblocking)
}
void on_active_exit() final {
// Not needed yet
}
void on_removal(ceph::os::Transaction &t) final;
std::pair<ghobject_t, bool>
do_delete_work(ceph::os::Transaction &t, ghobject_t _next) final;
// merge/split not ready
void clear_ready_to_merge() final {}
void set_not_ready_to_merge_target(pg_t pgid, pg_t src) final {}
void set_not_ready_to_merge_source(pg_t pgid) final {}
void set_ready_to_merge_target(eversion_t lu, epoch_t les, epoch_t lec) final {}
void set_ready_to_merge_source(eversion_t lu) final {}
void on_active_actmap() final;
void on_active_advmap(const OSDMapRef &osdmap) final;
epoch_t cluster_osdmap_trim_lower_bound() final {
// TODO
return 0;
}
void on_backfill_reserved() final {
recovery_handler->on_backfill_reserved();
}
void on_backfill_canceled() final {
ceph_assert(0 == "Not implemented");
}
void on_recovery_reserved() final {
recovery_handler->start_pglogbased_recovery();
}
bool try_reserve_recovery_space(
int64_t primary_num_bytes, int64_t local_num_bytes) final {
// TODO
return true;
}
void unreserve_recovery_space() final {}
struct PGLogEntryHandler : public PGLog::LogEntryHandler {
PG *pg;
ceph::os::Transaction *t;
PGLogEntryHandler(PG *pg, ceph::os::Transaction *t) : pg(pg), t(t) {}
// LogEntryHandler
void remove(const hobject_t &hoid) override {
// TODO
}
void try_stash(const hobject_t &hoid, version_t v) override {
// TODO
}
void rollback(const pg_log_entry_t &entry) override {
// TODO
}
void rollforward(const pg_log_entry_t &entry) override {
// TODO
}
void trim(const pg_log_entry_t &entry) override {
// TODO
}
};
PGLog::LogEntryHandlerRef get_log_handler(
ceph::os::Transaction &t) final {
return std::make_unique<PG::PGLogEntryHandler>(this, &t);
}
void rebuild_missing_set_with_deletes(PGLog &pglog) final {
pglog.rebuild_missing_set_with_deletes_crimson(
shard_services.get_store(),
coll_ref,
peering_state.get_info()).get();
}
PerfCounters &get_peering_perf() final {
return shard_services.get_recoverystate_perf_logger();
}
PerfCounters &get_perf_logger() final {
return shard_services.get_perf_logger();
}
void log_state_enter(const char *state) final;
void log_state_exit(
const char *state_name, utime_t enter_time,
uint64_t events, utime_t event_dur) final;
void dump_recovery_info(Formatter *f) const final {
}
OstreamTemp get_clog_info() final {
// not needed yet: replace with not a stub (needs to be wired up to monc)
return OstreamTemp(CLOG_INFO, nullptr);
}
OstreamTemp get_clog_debug() final {
// not needed yet: replace with not a stub (needs to be wired up to monc)
return OstreamTemp(CLOG_DEBUG, nullptr);
}
OstreamTemp get_clog_error() final {
// not needed yet: replace with not a stub (needs to be wired up to monc)
return OstreamTemp(CLOG_ERROR, nullptr);
}
ceph::signedspan get_mnow() const final;
HeartbeatStampsRef get_hb_stamps(int peer) final;
void schedule_renew_lease(epoch_t plr, ceph::timespan delay) final;
// Utility
bool is_primary() const final {
return peering_state.is_primary();
}
bool is_nonprimary() const {
return peering_state.is_nonprimary();
}
bool is_peered() const final {
return peering_state.is_peered();
}
bool is_recovering() const final {
return peering_state.is_recovering();
}
bool is_backfilling() const final {
return peering_state.is_backfilling();
}
uint64_t get_last_user_version() const {
return get_info().last_user_version;
}
bool get_need_up_thru() const {
return peering_state.get_need_up_thru();
}
epoch_t get_same_interval_since() const {
return get_info().history.same_interval_since;
}
const auto& get_pgpool() const {
return peering_state.get_pgpool();
}
pg_shard_t get_primary() const {
return peering_state.get_primary();
}
/// initialize created PG
void init(
int role,
const std::vector<int>& up,
int up_primary,
const std::vector<int>& acting,
int acting_primary,
const pg_history_t& history,
const PastIntervals& pim,
ceph::os::Transaction &t);
seastar::future<> read_state(crimson::os::FuturizedStore::Shard* store);
interruptible_future<> do_peering_event(
PGPeeringEvent& evt, PeeringCtx &rctx);
seastar::future<> handle_advance_map(cached_map_t next_map, PeeringCtx &rctx);
seastar::future<> handle_activate_map(PeeringCtx &rctx);
seastar::future<> handle_initialize(PeeringCtx &rctx);
static hobject_t get_oid(const hobject_t& hobj);
static RWState::State get_lock_type(const OpInfo &op_info);
using load_obc_ertr = crimson::errorator<
crimson::ct_error::enoent,
crimson::ct_error::object_corrupted>;
using load_obc_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
load_obc_ertr>;
using interruptor = ::crimson::interruptible::interruptor<
::crimson::osd::IOInterruptCondition>;
public:
using with_obc_func_t =
std::function<load_obc_iertr::future<> (ObjectContextRef)>;
load_obc_iertr::future<> with_locked_obc(
const hobject_t &hobj,
const OpInfo &op_info,
with_obc_func_t&& f);
interruptible_future<> handle_rep_op(Ref<MOSDRepOp> m);
void log_operation(
std::vector<pg_log_entry_t>&& logv,
const eversion_t &trim_to,
const eversion_t &roll_forward_to,
const eversion_t &min_last_complete_ondisk,
bool transaction_applied,
ObjectStore::Transaction &txn,
bool async = false);
void replica_clear_repop_obc(
const std::vector<pg_log_entry_t> &logv);
void handle_rep_op_reply(const MOSDRepOpReply& m);
interruptible_future<> do_update_log_missing(
Ref<MOSDPGUpdateLogMissing> m,
crimson::net::ConnectionRef conn);
interruptible_future<> do_update_log_missing_reply(
Ref<MOSDPGUpdateLogMissingReply> m);
void print(std::ostream& os) const;
void dump_primary(Formatter*);
seastar::future<> submit_error_log(
Ref<MOSDOp> m,
const OpInfo &op_info,
ObjectContextRef obc,
const std::error_code e,
ceph_tid_t rep_tid,
eversion_t &version);
private:
using do_osd_ops_ertr = crimson::errorator<
crimson::ct_error::eagain>;
using do_osd_ops_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
::crimson::errorator<crimson::ct_error::eagain>>;
template <typename Ret = void>
using pg_rep_op_fut_t =
std::tuple<interruptible_future<>,
do_osd_ops_iertr::future<Ret>>;
do_osd_ops_iertr::future<pg_rep_op_fut_t<MURef<MOSDOpReply>>> do_osd_ops(
Ref<MOSDOp> m,
crimson::net::ConnectionRef conn,
ObjectContextRef obc,
const OpInfo &op_info,
const SnapContext& snapc);
using do_osd_ops_success_func_t =
std::function<do_osd_ops_iertr::future<>()>;
using do_osd_ops_failure_func_t =
std::function<do_osd_ops_iertr::future<>(const std::error_code&)>;
struct do_osd_ops_params_t;
do_osd_ops_iertr::future<pg_rep_op_fut_t<>> do_osd_ops(
ObjectContextRef obc,
std::vector<OSDOp>& ops,
const OpInfo &op_info,
const do_osd_ops_params_t &¶ms,
do_osd_ops_success_func_t success_func,
do_osd_ops_failure_func_t failure_func);
template <class Ret, class SuccessFunc, class FailureFunc>
do_osd_ops_iertr::future<pg_rep_op_fut_t<Ret>> do_osd_ops_execute(
seastar::lw_shared_ptr<OpsExecuter> ox,
std::vector<OSDOp>& ops,
SuccessFunc&& success_func,
FailureFunc&& failure_func);
interruptible_future<MURef<MOSDOpReply>> do_pg_ops(Ref<MOSDOp> m);
std::tuple<interruptible_future<>, interruptible_future<>>
submit_transaction(
ObjectContextRef&& obc,
ceph::os::Transaction&& txn,
osd_op_params_t&& oop,
std::vector<pg_log_entry_t>&& log_entries);
interruptible_future<> repair_object(
const hobject_t& oid,
eversion_t& v);
void check_blocklisted_obc_watchers(ObjectContextRef &obc);
private:
PG_OSDMapGate osdmap_gate;
ShardServices &shard_services;
public:
cached_map_t get_osdmap() { return peering_state.get_osdmap(); }
eversion_t next_version() {
return eversion_t(get_osdmap_epoch(),
++projected_last_update.version);
}
ShardServices& get_shard_services() final {
return shard_services;
}
seastar::future<> stop();
private:
std::unique_ptr<PGBackend> backend;
std::unique_ptr<RecoveryBackend> recovery_backend;
std::unique_ptr<PGRecovery> recovery_handler;
PeeringState peering_state;
eversion_t projected_last_update;
public:
ObjectContextRegistry obc_registry;
ObjectContextLoader obc_loader;
private:
OSDriver osdriver;
SnapMapper snap_mapper;
public:
// PeeringListener
void publish_stats_to_osd() final;
void clear_publish_stats() final;
pg_stat_t get_stats() const;
private:
std::optional<pg_stat_t> pg_stats;
public:
RecoveryBackend* get_recovery_backend() final {
return recovery_backend.get();
}
PGRecovery* get_recovery_handler() final {
return recovery_handler.get();
}
PeeringState& get_peering_state() final {
return peering_state;
}
bool has_reset_since(epoch_t epoch) const final {
return peering_state.pg_has_reset_since(epoch);
}
const pg_missing_tracker_t& get_local_missing() const {
return peering_state.get_pg_log().get_missing();
}
epoch_t get_last_peering_reset() const final {
return peering_state.get_last_peering_reset();
}
const std::set<pg_shard_t> &get_acting_recovery_backfill() const {
return peering_state.get_acting_recovery_backfill();
}
bool is_backfill_target(pg_shard_t osd) const {
return peering_state.is_backfill_target(osd);
}
void begin_peer_recover(pg_shard_t peer, const hobject_t oid) {
peering_state.begin_peer_recover(peer, oid);
}
uint64_t min_peer_features() const {
return peering_state.get_min_peer_features();
}
const std::map<hobject_t, std::set<pg_shard_t>>&
get_missing_loc_shards() const {
return peering_state.get_missing_loc().get_missing_locs();
}
const std::map<pg_shard_t, pg_missing_t> &get_shard_missing() const {
return peering_state.get_peer_missing();
}
epoch_t get_interval_start_epoch() const {
return get_info().history.same_interval_since;
}
const pg_missing_const_i* get_shard_missing(pg_shard_t shard) const {
if (shard == pg_whoami)
return &get_local_missing();
else {
auto it = peering_state.get_peer_missing().find(shard);
if (it == peering_state.get_peer_missing().end())
return nullptr;
else
return &it->second;
}
}
struct complete_op_t {
const version_t user_version;
const eversion_t version;
const int err;
};
interruptible_future<std::optional<complete_op_t>>
already_complete(const osd_reqid_t& reqid);
int get_recovery_op_priority() const {
int64_t pri = 0;
get_pgpool().info.opts.get(pool_opts_t::RECOVERY_OP_PRIORITY, &pri);
return pri > 0 ? pri : crimson::common::local_conf()->osd_recovery_op_priority;
}
seastar::future<> mark_unfound_lost(int) {
// TODO: see PrimaryLogPG::mark_all_unfound_lost()
return seastar::now();
}
bool old_peering_msg(epoch_t reply_epoch, epoch_t query_epoch) const;
template <typename MsgType>
bool can_discard_replica_op(const MsgType& m) const {
return can_discard_replica_op(m, m.map_epoch);
}
private:
// instead of seastar::gate, we use a boolean flag to indicate
// whether the system is shutting down, as we don't need to track
// continuations here.
bool stopping = false;
PGActivationBlocker wait_for_active_blocker;
friend std::ostream& operator<<(std::ostream&, const PG& pg);
friend class ClientRequest;
friend struct CommonClientRequest;
friend class PGAdvanceMap;
template <class T>
friend class PeeringEvent;
friend class RepRequest;
friend class LogMissingRequest;
friend class LogMissingRequestReply;
friend class BackfillRecovery;
friend struct PGFacade;
friend class InternalClientRequest;
friend class WatchTimeoutRequest;
friend class SnapTrimEvent;
friend class SnapTrimObjSubEvent;
private:
seastar::future<bool> find_unfound() {
return seastar::make_ready_future<bool>(true);
}
bool can_discard_replica_op(const Message& m, epoch_t m_map_epoch) const;
bool can_discard_op(const MOSDOp& m) const;
void context_registry_on_change();
bool is_missing_object(const hobject_t& soid) const {
return peering_state.get_pg_log().get_missing().get_items().count(soid);
}
bool is_unreadable_object(const hobject_t &oid,
eversion_t* v = 0) const final {
return is_missing_object(oid) ||
!peering_state.get_missing_loc().readable_with_acting(
oid, get_actingset(), v);
}
bool is_degraded_or_backfilling_object(const hobject_t& soid) const;
const std::set<pg_shard_t> &get_actingset() const {
return peering_state.get_actingset();
}
private:
friend class IOInterruptCondition;
struct log_update_t {
std::set<pg_shard_t> waiting_on;
seastar::shared_promise<> all_committed;
};
std::map<ceph_tid_t, log_update_t> log_entry_update_waiting_on;
// snap trimming
interval_set<snapid_t> snap_trimq;
};
struct PG::do_osd_ops_params_t {
crimson::net::ConnectionRef &get_connection() const {
return conn;
}
osd_reqid_t get_reqid() const {
return reqid;
}
utime_t get_mtime() const {
return mtime;
};
epoch_t get_map_epoch() const {
return map_epoch;
}
entity_inst_t get_orig_source_inst() const {
return orig_source_inst;
}
uint64_t get_features() const {
return features;
}
// Only used by InternalClientRequest, no op flags
bool has_flag(uint32_t flag) const {
return false;
}
// Only used by ExecutableMessagePimpl
entity_name_t get_source() const {
return orig_source_inst.name;
}
crimson::net::ConnectionRef &conn;
osd_reqid_t reqid;
utime_t mtime;
epoch_t map_epoch;
entity_inst_t orig_source_inst;
uint64_t features;
};
std::ostream& operator<<(std::ostream&, const PG& pg);
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::PG> : fmt::ostream_formatter {};
#endif
| 23,713 | 28.385378 | 84 | h |
null | ceph-main/src/crimson/osd/pg_activation_blocker.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <seastar/core/future.hh>
#include <seastar/core/shared_future.hh>
#include "crimson/common/operation.h"
#include "crimson/osd/osd_operation.h"
namespace crimson::osd {
class PG;
class PGActivationBlocker : public crimson::BlockerT<PGActivationBlocker> {
PG *pg;
const spg_t pgid;
seastar::shared_promise<> p;
protected:
void dump_detail(Formatter *f) const;
public:
static constexpr const char *type_name = "PGActivationBlocker";
using Blocker = PGActivationBlocker;
PGActivationBlocker(PG *pg) : pg(pg) {}
void unblock();
seastar::future<> wait(PGActivationBlocker::BlockingEvent::TriggerI&&);
seastar::future<> stop();
};
} // namespace crimson::osd
| 815 | 21.666667 | 75 | h |
null | ceph-main/src/crimson/osd/pg_backend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <map>
#include <memory>
#include <string>
#include <boost/container/flat_set.hpp>
#include "include/rados.h"
#include "crimson/os/futurized_store.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/osd/acked_peers.h"
#include "crimson/common/shared_lru.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDOpReply.h"
#include "os/Transaction.h"
#include "osd/osd_types.h"
#include "crimson/osd/object_context.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/osd_operations/osdop_params.h"
struct hobject_t;
namespace ceph::os {
class Transaction;
}
namespace crimson::osd {
class ShardServices;
class PG;
class ObjectContextLoader;
}
class PGBackend
{
protected:
using CollectionRef = crimson::os::CollectionRef;
using ec_profile_t = std::map<std::string, std::string>;
// low-level read errorator
using ll_read_errorator = crimson::os::FuturizedStore::Shard::read_errorator;
using ll_read_ierrorator =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
ll_read_errorator>;
public:
using load_metadata_ertr = crimson::errorator<
crimson::ct_error::object_corrupted>;
using load_metadata_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
load_metadata_ertr>;
using interruptor =
::crimson::interruptible::interruptor<
::crimson::osd::IOInterruptCondition>;
template <typename T = void>
using interruptible_future =
::crimson::interruptible::interruptible_future<
::crimson::osd::IOInterruptCondition, T>;
using rep_op_fut_t =
std::tuple<interruptible_future<>,
interruptible_future<crimson::osd::acked_peers_t>>;
PGBackend(shard_id_t shard, CollectionRef coll,
crimson::osd::ShardServices &shard_services,
DoutPrefixProvider &dpp);
virtual ~PGBackend() = default;
static std::unique_ptr<PGBackend> create(pg_t pgid,
const pg_shard_t pg_shard,
const pg_pool_t& pool,
crimson::os::CollectionRef coll,
crimson::osd::ShardServices& shard_services,
const ec_profile_t& ec_profile,
DoutPrefixProvider &dpp);
using attrs_t =
std::map<std::string, ceph::bufferptr, std::less<>>;
using read_errorator = ll_read_errorator::extend<
crimson::ct_error::object_corrupted>;
using read_ierrorator =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
read_errorator>;
read_ierrorator::future<> read(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats);
read_ierrorator::future<> sparse_read(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats);
using checksum_errorator = ll_read_errorator::extend<
crimson::ct_error::object_corrupted,
crimson::ct_error::invarg>;
using checksum_ierrorator =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
checksum_errorator>;
checksum_ierrorator::future<> checksum(
const ObjectState& os,
OSDOp& osd_op);
using cmp_ext_errorator = ll_read_errorator::extend<
crimson::ct_error::invarg,
crimson::ct_error::cmp_fail>;
using cmp_ext_ierrorator =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
cmp_ext_errorator>;
cmp_ext_ierrorator::future<> cmp_ext(
const ObjectState& os,
OSDOp& osd_op);
using stat_errorator = crimson::errorator<crimson::ct_error::enoent>;
using stat_ierrorator =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
stat_errorator>;
stat_ierrorator::future<> stat(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats);
// TODO: switch the entire write family to errorator.
using write_ertr = crimson::errorator<
crimson::ct_error::file_too_large,
crimson::ct_error::invarg>;
using write_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
write_ertr>;
using create_ertr = crimson::errorator<
crimson::ct_error::invarg,
crimson::ct_error::eexist>;
using create_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
create_ertr>;
create_iertr::future<> create(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
object_stat_sum_t& delta_stats);
using remove_ertr = crimson::errorator<
crimson::ct_error::enoent>;
using remove_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
remove_ertr>;
remove_iertr::future<> remove(
ObjectState& os,
ceph::os::Transaction& txn,
object_stat_sum_t& delta_stats,
bool whiteout);
interruptible_future<> remove(
ObjectState& os,
ceph::os::Transaction& txn);
interruptible_future<> set_allochint(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
object_stat_sum_t& delta_stats);
write_iertr::future<> write(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats);
interruptible_future<> write_same(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats);
write_iertr::future<> writefull(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats);
using append_errorator = crimson::errorator<
crimson::ct_error::invarg>;
using append_ierrorator =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
append_errorator>;
append_ierrorator::future<> append(
ObjectState& os,
OSDOp& osd_op,
ceph::os::Transaction& trans,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats);
using rollback_ertr = crimson::errorator<
crimson::ct_error::enoent>;
using rollback_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
rollback_ertr>;
rollback_iertr::future<> rollback(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& txn,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats,
crimson::osd::ObjectContextRef head,
crimson::osd::ObjectContextLoader& obc_loader);
write_iertr::future<> truncate(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats);
write_iertr::future<> zero(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats);
rep_op_fut_t mutate_object(
std::set<pg_shard_t> pg_shards,
crimson::osd::ObjectContextRef &&obc,
ceph::os::Transaction&& txn,
osd_op_params_t&& osd_op_p,
epoch_t min_epoch,
epoch_t map_epoch,
std::vector<pg_log_entry_t>&& log_entries);
interruptible_future<std::tuple<std::vector<hobject_t>, hobject_t>> list_objects(
const hobject_t& start,
uint64_t limit) const;
using setxattr_errorator = crimson::errorator<
crimson::ct_error::file_too_large,
crimson::ct_error::enametoolong>;
using setxattr_ierrorator =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
setxattr_errorator>;
setxattr_ierrorator::future<> setxattr(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
object_stat_sum_t& delta_stats);
using get_attr_errorator = crimson::os::FuturizedStore::Shard::get_attr_errorator;
using get_attr_ierrorator =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
get_attr_errorator>;
get_attr_ierrorator::future<> getxattr(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats) const;
get_attr_ierrorator::future<ceph::bufferlist> getxattr(
const hobject_t& soid,
std::string_view key) const;
get_attr_ierrorator::future<ceph::bufferlist> getxattr(
const hobject_t& soid,
std::string&& key) const;
get_attr_ierrorator::future<> get_xattrs(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats) const;
using cmp_xattr_errorator = get_attr_errorator::extend<
crimson::ct_error::ecanceled,
crimson::ct_error::invarg>;
using cmp_xattr_ierrorator =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
cmp_xattr_errorator>;
cmp_xattr_ierrorator::future<> cmp_xattr(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats) const;
using rm_xattr_ertr = crimson::errorator<crimson::ct_error::enoent>;
using rm_xattr_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
rm_xattr_ertr>;
rm_xattr_iertr::future<> rm_xattr(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans);
void clone(
/* const */object_info_t& snap_oi,
const ObjectState& os,
const ObjectState& d_os,
ceph::os::Transaction& trans);
interruptible_future<struct stat> stat(
CollectionRef c,
const ghobject_t& oid) const;
read_errorator::future<std::map<uint64_t, uint64_t>> fiemap(
CollectionRef c,
const ghobject_t& oid,
uint64_t off,
uint64_t len);
write_iertr::future<> tmapput(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
object_stat_sum_t& delta_stats,
osd_op_params_t& osd_op_params);
using tmapup_ertr = write_ertr::extend<
crimson::ct_error::enoent,
crimson::ct_error::eexist>;
using tmapup_iertr = ::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
tmapup_ertr>;
tmapup_iertr::future<> tmapup(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
object_stat_sum_t& delta_stats,
osd_op_params_t& osd_op_params);
read_ierrorator::future<> tmapget(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats);
// OMAP
ll_read_ierrorator::future<> omap_get_keys(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats) const;
using omap_cmp_ertr =
crimson::os::FuturizedStore::Shard::read_errorator::extend<
crimson::ct_error::ecanceled,
crimson::ct_error::invarg>;
using omap_cmp_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
omap_cmp_ertr>;
omap_cmp_iertr::future<> omap_cmp(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats) const;
ll_read_ierrorator::future<> omap_get_vals(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats) const;
ll_read_ierrorator::future<> omap_get_vals_by_keys(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats) const;
interruptible_future<> omap_set_vals(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats);
ll_read_ierrorator::future<ceph::bufferlist> omap_get_header(
const crimson::os::CollectionRef& c,
const ghobject_t& oid) const;
ll_read_ierrorator::future<> omap_get_header(
const ObjectState& os,
OSDOp& osd_op,
object_stat_sum_t& delta_stats) const;
interruptible_future<> omap_set_header(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats);
interruptible_future<> omap_remove_range(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans,
object_stat_sum_t& delta_stats);
interruptible_future<> omap_remove_key(
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans);
using omap_clear_ertr = crimson::errorator<crimson::ct_error::enoent>;
using omap_clear_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
omap_clear_ertr>;
omap_clear_iertr::future<> omap_clear(
ObjectState& os,
OSDOp& osd_op,
ceph::os::Transaction& trans,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats);
virtual void got_rep_op_reply(const MOSDRepOpReply&) {}
virtual seastar::future<> stop() = 0;
virtual void on_actingset_changed(bool same_primary) = 0;
protected:
const shard_id_t shard;
CollectionRef coll;
crimson::osd::ShardServices &shard_services;
DoutPrefixProvider &dpp; ///< provides log prefix context
crimson::os::FuturizedStore::Shard* store;
virtual seastar::future<> request_committed(
const osd_reqid_t& reqid,
const eversion_t& at_version) = 0;
public:
struct loaded_object_md_t {
ObjectState os;
crimson::osd::SnapSetContextRef ssc;
using ref = std::unique_ptr<loaded_object_md_t>;
};
load_metadata_iertr::future<loaded_object_md_t::ref>
load_metadata(
const hobject_t &oid);
private:
virtual ll_read_ierrorator::future<ceph::bufferlist> _read(
const hobject_t& hoid,
size_t offset,
size_t length,
uint32_t flags) = 0;
write_iertr::future<> _writefull(
ObjectState& os,
off_t truncate_size,
const bufferlist& bl,
ceph::os::Transaction& txn,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats,
unsigned flags);
write_iertr::future<> _truncate(
ObjectState& os,
ceph::os::Transaction& txn,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats,
size_t offset,
size_t truncate_size,
uint32_t truncate_seq);
bool maybe_create_new_object(ObjectState& os,
ceph::os::Transaction& txn,
object_stat_sum_t& delta_stats);
void update_size_and_usage(object_stat_sum_t& delta_stats,
object_info_t& oi, uint64_t offset,
uint64_t length, bool write_full = false);
void truncate_update_size_and_usage(
object_stat_sum_t& delta_stats,
object_info_t& oi,
uint64_t truncate_size);
virtual rep_op_fut_t
_submit_transaction(std::set<pg_shard_t>&& pg_shards,
const hobject_t& hoid,
ceph::os::Transaction&& txn,
osd_op_params_t&& osd_op_p,
epoch_t min_epoch, epoch_t max_epoch,
std::vector<pg_log_entry_t>&& log_entries) = 0;
friend class ReplicatedRecoveryBackend;
friend class ::crimson::osd::PG;
};
| 14,909 | 32.207127 | 84 | h |
null | ceph-main/src/crimson/osd/pg_interval_interrupt_condition.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include "include/types.h"
#include "crimson/common/errorator.h"
#include "crimson/common/exception.h"
#include "crimson/common/type_helpers.h"
namespace crimson::osd {
class PG;
class IOInterruptCondition {
public:
IOInterruptCondition(Ref<PG>& pg);
~IOInterruptCondition();
bool new_interval_created();
bool is_stopping();
bool is_primary();
template <typename Fut>
std::optional<Fut> may_interrupt() {
if (new_interval_created()) {
return seastar::futurize<Fut>::make_exception_future(
::crimson::common::actingset_changed(is_primary()));
}
if (is_stopping()) {
return seastar::futurize<Fut>::make_exception_future(
::crimson::common::system_shutdown_exception());
}
return std::optional<Fut>();
}
template <typename T>
static constexpr bool is_interruption_v =
std::is_same_v<T, ::crimson::common::actingset_changed>
|| std::is_same_v<T, ::crimson::common::system_shutdown_exception>;
static bool is_interruption(std::exception_ptr& eptr) {
return (*eptr.__cxa_exception_type() ==
typeid(::crimson::common::actingset_changed) ||
*eptr.__cxa_exception_type() ==
typeid(::crimson::common::system_shutdown_exception));
}
private:
Ref<PG> pg;
epoch_t e;
};
} // namespace crimson::osd
| 1,445 | 24.368421 | 72 | h |
null | ceph-main/src/crimson/osd/pg_map.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <map>
#include <algorithm>
#include <seastar/core/future.hh>
#include <seastar/core/shared_future.hh>
#include "include/types.h"
#include "crimson/common/type_helpers.h"
#include "crimson/common/smp_helpers.h"
#include "crimson/osd/osd_operation.h"
#include "osd/osd_types.h"
namespace crimson::osd {
class PG;
/**
* PGShardMapping
*
* Maintains a mapping from spg_t to the core containing that PG. Internally, each
* core has a local copy of the mapping to enable core-local lookups. Updates
* are proxied to core 0, and the back out to all other cores -- see maybe_create_pg.
*/
class PGShardMapping : public seastar::peering_sharded_service<PGShardMapping> {
public:
/// Returns mapping if present, NULL_CORE otherwise
core_id_t get_pg_mapping(spg_t pgid) {
auto iter = pg_to_core.find(pgid);
ceph_assert_always(iter == pg_to_core.end() || iter->second != NULL_CORE);
return iter == pg_to_core.end() ? NULL_CORE : iter->second;
}
/// Returns mapping for pgid, creates new one if it doesn't already exist
seastar::future<core_id_t> maybe_create_pg(
spg_t pgid,
core_id_t core = NULL_CORE) {
auto find_iter = pg_to_core.find(pgid);
if (find_iter != pg_to_core.end()) {
ceph_assert_always(find_iter->second != NULL_CORE);
if (core != NULL_CORE) {
ceph_assert_always(find_iter->second == core);
}
return seastar::make_ready_future<core_id_t>(find_iter->second);
} else {
return container().invoke_on(0,[pgid, core]
(auto &primary_mapping) {
auto [insert_iter, inserted] = primary_mapping.pg_to_core.emplace(pgid, core);
ceph_assert_always(inserted);
ceph_assert_always(primary_mapping.core_to_num_pgs.size() > 0);
std::map<core_id_t, unsigned>::iterator core_iter;
if (core == NULL_CORE) {
core_iter = std::min_element(
primary_mapping.core_to_num_pgs.begin(),
primary_mapping.core_to_num_pgs.end(),
[](const auto &left, const auto &right) {
return left.second < right.second;
});
} else {
core_iter = primary_mapping.core_to_num_pgs.find(core);
}
ceph_assert_always(primary_mapping.core_to_num_pgs.end() != core_iter);
insert_iter->second = core_iter->first;
core_iter->second++;
return primary_mapping.container().invoke_on_others(
[pgid = insert_iter->first, core = insert_iter->second]
(auto &other_mapping) {
ceph_assert_always(core != NULL_CORE);
auto [insert_iter, inserted] = other_mapping.pg_to_core.emplace(pgid, core);
ceph_assert_always(inserted);
});
}).then([this, pgid] {
auto find_iter = pg_to_core.find(pgid);
return seastar::make_ready_future<core_id_t>(find_iter->second);
});
}
}
/// Remove pgid
seastar::future<> remove_pg(spg_t pgid) {
return container().invoke_on(0, [pgid](auto &primary_mapping) {
auto iter = primary_mapping.pg_to_core.find(pgid);
ceph_assert_always(iter != primary_mapping.pg_to_core.end());
ceph_assert_always(iter->second != NULL_CORE);
auto count_iter = primary_mapping.core_to_num_pgs.find(iter->second);
ceph_assert_always(count_iter != primary_mapping.core_to_num_pgs.end());
ceph_assert_always(count_iter->second > 0);
--(count_iter->second);
primary_mapping.pg_to_core.erase(iter);
return primary_mapping.container().invoke_on_others(
[pgid](auto &other_mapping) {
auto iter = other_mapping.pg_to_core.find(pgid);
ceph_assert_always(iter != other_mapping.pg_to_core.end());
ceph_assert_always(iter->second != NULL_CORE);
other_mapping.pg_to_core.erase(iter);
});
});
}
size_t get_num_pgs() const { return pg_to_core.size(); }
/// Map to cores in [min_core_mapping, core_mapping_limit)
PGShardMapping(core_id_t min_core_mapping, core_id_t core_mapping_limit) {
ceph_assert_always(min_core_mapping < core_mapping_limit);
for (auto i = min_core_mapping; i != core_mapping_limit; ++i) {
core_to_num_pgs.emplace(i, 0);
}
}
template <typename F>
void for_each_pgid(F &&f) const {
for (const auto &i: pg_to_core) {
std::invoke(f, i.first);
}
}
private:
std::map<core_id_t, unsigned> core_to_num_pgs;
std::map<spg_t, core_id_t> pg_to_core;
};
/**
* PGMap
*
* Maps spg_t to PG instance within a shard. Handles dealing with waiting
* on pg creation.
*/
class PGMap {
struct PGCreationState : BlockerT<PGCreationState> {
static constexpr const char * type_name = "PGCreation";
void dump_detail(Formatter *f) const final;
spg_t pgid;
seastar::shared_promise<Ref<PG>> promise;
bool creating = false;
PGCreationState(spg_t pgid);
PGCreationState(const PGCreationState &) = delete;
PGCreationState(PGCreationState &&) = delete;
PGCreationState &operator=(const PGCreationState &) = delete;
PGCreationState &operator=(PGCreationState &&) = delete;
~PGCreationState();
};
std::map<spg_t, PGCreationState> pgs_creating;
using pgs_t = std::map<spg_t, Ref<PG>>;
pgs_t pgs;
public:
using PGCreationBlocker = PGCreationState;
using PGCreationBlockingEvent = PGCreationBlocker::BlockingEvent;
/**
* Get future for pg with a bool indicating whether it's already being
* created.
*/
using wait_for_pg_ertr = crimson::errorator<
crimson::ct_error::ecanceled>;
using wait_for_pg_fut = wait_for_pg_ertr::future<Ref<PG>>;
using wait_for_pg_ret = std::pair<wait_for_pg_fut, bool>;
wait_for_pg_ret wait_for_pg(PGCreationBlockingEvent::TriggerI&&, spg_t pgid);
/**
* get PG in non-blocking manner
*/
Ref<PG> get_pg(spg_t pgid);
/**
* Set creating
*/
void set_creating(spg_t pgid);
/**
* Set newly created pg
*/
void pg_created(spg_t pgid, Ref<PG> pg);
/**
* Add newly loaded pg
*/
void pg_loaded(spg_t pgid, Ref<PG> pg);
/**
* Cancel pending creation of pgid.
*/
void pg_creation_canceled(spg_t pgid);
void remove_pg(spg_t pgid);
pgs_t& get_pgs() { return pgs; }
const pgs_t& get_pgs() const { return pgs; }
auto get_pg_count() const { return pgs.size(); }
PGMap() = default;
~PGMap();
};
}
| 6,427 | 30.821782 | 86 | h |
null | ceph-main/src/crimson/osd/pg_meta.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <tuple>
#include <seastar/core/future.hh>
#include "osd/osd_types.h"
#include "crimson/os/futurized_store.h"
/// PG related metadata
class PGMeta
{
crimson::os::FuturizedStore::Shard& store;
const spg_t pgid;
public:
PGMeta(crimson::os::FuturizedStore::Shard& store, spg_t pgid);
seastar::future<epoch_t> get_epoch();
seastar::future<std::tuple<pg_info_t, PastIntervals>> load();
};
| 516 | 23.619048 | 70 | h |
null | ceph-main/src/crimson/osd/pg_recovery.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include "crimson/osd/backfill_state.h"
#include "crimson/osd/pg_interval_interrupt_condition.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/pg_recovery_listener.h"
#include "crimson/osd/scheduler/scheduler.h"
#include "crimson/osd/shard_services.h"
#include "crimson/osd/recovery_backend.h"
#include "osd/object_state.h"
namespace crimson::osd {
class UrgentRecovery;
}
class MOSDPGBackfillRemove;
class PGBackend;
class PGRecovery : public crimson::osd::BackfillState::BackfillListener {
public:
template <typename T = void>
using interruptible_future = RecoveryBackend::interruptible_future<T>;
PGRecovery(PGRecoveryListener* pg) : pg(pg) {}
virtual ~PGRecovery() {}
void start_pglogbased_recovery();
interruptible_future<bool> start_recovery_ops(
RecoveryBackend::RecoveryBlockingEvent::TriggerI&,
size_t max_to_start);
void on_backfill_reserved();
void dispatch_backfill_event(
boost::intrusive_ptr<const boost::statechart::event_base> evt);
seastar::future<> stop() { return seastar::now(); }
private:
PGRecoveryListener* pg;
size_t start_primary_recovery_ops(
RecoveryBackend::RecoveryBlockingEvent::TriggerI&,
size_t max_to_start,
std::vector<interruptible_future<>> *out);
size_t start_replica_recovery_ops(
RecoveryBackend::RecoveryBlockingEvent::TriggerI&,
size_t max_to_start,
std::vector<interruptible_future<>> *out);
std::vector<pg_shard_t> get_replica_recovery_order() const {
return pg->get_replica_recovery_order();
}
RecoveryBackend::interruptible_future<> recover_missing(
RecoveryBackend::RecoveryBlockingEvent::TriggerI&,
const hobject_t &soid, eversion_t need);
RecoveryBackend::interruptible_future<> prep_object_replica_deletes(
RecoveryBackend::RecoveryBlockingEvent::TriggerI& trigger,
const hobject_t& soid,
eversion_t need);
RecoveryBackend::interruptible_future<> prep_object_replica_pushes(
RecoveryBackend::RecoveryBlockingEvent::TriggerI& trigger,
const hobject_t& soid,
eversion_t need);
void on_local_recover(
const hobject_t& soid,
const ObjectRecoveryInfo& recovery_info,
bool is_delete,
ceph::os::Transaction& t);
void on_global_recover (
const hobject_t& soid,
const object_stat_sum_t& stat_diff,
bool is_delete);
void on_failed_recover(
const std::set<pg_shard_t>& from,
const hobject_t& soid,
const eversion_t& v);
void on_peer_recover(
pg_shard_t peer,
const hobject_t &oid,
const ObjectRecoveryInfo &recovery_info);
void _committed_pushed_object(epoch_t epoch,
eversion_t last_complete);
friend class ReplicatedRecoveryBackend;
friend class crimson::osd::UrgentRecovery;
// backfill begin
std::unique_ptr<crimson::osd::BackfillState> backfill_state;
std::map<pg_shard_t,
MURef<MOSDPGBackfillRemove>> backfill_drop_requests;
template <class EventT>
void start_backfill_recovery(
const EventT& evt);
void request_replica_scan(
const pg_shard_t& target,
const hobject_t& begin,
const hobject_t& end) final;
void request_primary_scan(
const hobject_t& begin) final;
void enqueue_push(
const hobject_t& obj,
const eversion_t& v) final;
void enqueue_drop(
const pg_shard_t& target,
const hobject_t& obj,
const eversion_t& v) final;
void maybe_flush() final;
void update_peers_last_backfill(
const hobject_t& new_last_backfill) final;
bool budget_available() const final;
void backfilled() final;
friend crimson::osd::BackfillState::PGFacade;
friend crimson::osd::PG;
// backfill end
};
| 3,761 | 30.613445 | 73 | h |
null | ceph-main/src/crimson/osd/pg_recovery_listener.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include "common/hobject.h"
#include "include/types.h"
#include "osd/osd_types.h"
namespace crimson::osd {
class ShardServices;
};
class RecoveryBackend;
class PGRecovery;
class PGRecoveryListener {
public:
virtual crimson::osd::ShardServices& get_shard_services() = 0;
virtual PGRecovery* get_recovery_handler() = 0;
virtual epoch_t get_osdmap_epoch() const = 0;
virtual bool is_primary() const = 0;
virtual bool is_peered() const = 0;
virtual bool is_recovering() const = 0;
virtual bool is_backfilling() const = 0;
virtual PeeringState& get_peering_state() = 0;
virtual const pg_shard_t& get_pg_whoami() const = 0;
virtual const spg_t& get_pgid() const = 0;
virtual RecoveryBackend* get_recovery_backend() = 0;
virtual bool is_unreadable_object(const hobject_t&, eversion_t* v = 0) const = 0;
virtual bool has_reset_since(epoch_t) const = 0;
virtual std::vector<pg_shard_t> get_replica_recovery_order() const = 0;
virtual epoch_t get_last_peering_reset() const = 0;
virtual const pg_info_t& get_info() const= 0;
virtual seastar::future<> stop() = 0;
virtual void publish_stats_to_osd() = 0;
};
| 1,286 | 31.175 | 83 | h |
null | ceph-main/src/crimson/osd/pg_shard_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include <seastar/core/shared_future.hh>
#include <seastar/core/sharded.hh>
#include "crimson/osd/shard_services.h"
#include "crimson/osd/pg_map.h"
namespace crimson::os {
class FuturizedStore;
}
namespace crimson::osd {
/**
* PGShardManager
*
* Manages all state required to partition PGs over seastar reactors
* as well as state required to route messages to pgs. Mediates access to
* shared resources required by PGs (objectstore, messenger, monclient,
* etc)
*/
class PGShardManager {
seastar::sharded<OSDSingletonState> &osd_singleton_state;
seastar::sharded<ShardServices> &shard_services;
PGShardMapping &pg_to_shard_mapping;
#define FORWARD_CONST(FROM_METHOD, TO_METHOD, TARGET) \
template <typename... Args> \
auto FROM_METHOD(Args&&... args) const { \
return TARGET.TO_METHOD(std::forward<Args>(args)...); \
}
#define FORWARD(FROM_METHOD, TO_METHOD, TARGET) \
template <typename... Args> \
auto FROM_METHOD(Args&&... args) { \
return TARGET.TO_METHOD(std::forward<Args>(args)...); \
}
#define FORWARD_TO_OSD_SINGLETON(METHOD) \
FORWARD(METHOD, METHOD, get_osd_singleton_state())
public:
using cached_map_t = OSDMapService::cached_map_t;
using local_cached_map_t = OSDMapService::local_cached_map_t;
PGShardManager(
seastar::sharded<OSDSingletonState> &osd_singleton_state,
seastar::sharded<ShardServices> &shard_services,
PGShardMapping &pg_to_shard_mapping)
: osd_singleton_state(osd_singleton_state),
shard_services(shard_services),
pg_to_shard_mapping(pg_to_shard_mapping) {}
auto &get_osd_singleton_state() {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return osd_singleton_state.local();
}
auto &get_osd_singleton_state() const {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return osd_singleton_state.local();
}
auto &get_shard_services() {
return shard_services.local();
}
auto &get_shard_services() const {
return shard_services.local();
}
auto &get_local_state() { return get_shard_services().local_state; }
auto &get_local_state() const { return get_shard_services().local_state; }
seastar::future<> update_map(local_cached_map_t &&map) {
get_osd_singleton_state().update_map(
make_local_shared_foreign(local_cached_map_t(map))
);
/* We need each core to get its own foreign_ptr<local_cached_map_t>.
* foreign_ptr can't be cheaply copied, so we make one for each core
* up front. */
return seastar::do_with(
std::vector<seastar::foreign_ptr<local_cached_map_t>>(),
[this, map](auto &fmaps) {
fmaps.resize(seastar::smp::count);
for (auto &i: fmaps) {
i = seastar::foreign_ptr(map);
}
return shard_services.invoke_on_all(
[&fmaps](auto &local) mutable {
local.local_state.update_map(
make_local_shared_foreign(
std::move(fmaps[seastar::this_shard_id()])
));
});
});
}
seastar::future<> stop_registries() {
return shard_services.invoke_on_all([](auto &local) {
return local.local_state.stop_registry();
});
}
FORWARD_TO_OSD_SINGLETON(send_pg_created)
// osd state forwards
FORWARD(is_active, is_active, get_shard_services().local_state.osd_state)
FORWARD(is_preboot, is_preboot, get_shard_services().local_state.osd_state)
FORWARD(is_booting, is_booting, get_shard_services().local_state.osd_state)
FORWARD(is_stopping, is_stopping, get_shard_services().local_state.osd_state)
FORWARD(is_prestop, is_prestop, get_shard_services().local_state.osd_state)
FORWARD(is_initializing, is_initializing, get_shard_services().local_state.osd_state)
FORWARD(set_prestop, set_prestop, get_shard_services().local_state.osd_state)
FORWARD(set_preboot, set_preboot, get_shard_services().local_state.osd_state)
FORWARD(set_booting, set_booting, get_shard_services().local_state.osd_state)
FORWARD(set_stopping, set_stopping, get_shard_services().local_state.osd_state)
FORWARD(set_active, set_active, get_shard_services().local_state.osd_state)
FORWARD(when_active, when_active, get_shard_services().local_state.osd_state)
FORWARD_CONST(get_osd_state_string, to_string, get_shard_services().local_state.osd_state)
FORWARD(got_map, got_map, get_shard_services().local_state.osdmap_gate)
FORWARD(wait_for_map, wait_for_map, get_shard_services().local_state.osdmap_gate)
// Metacoll
FORWARD_TO_OSD_SINGLETON(init_meta_coll)
FORWARD_TO_OSD_SINGLETON(get_meta_coll)
FORWARD_TO_OSD_SINGLETON(set_superblock)
// Core OSDMap methods
FORWARD_TO_OSD_SINGLETON(get_local_map)
FORWARD_TO_OSD_SINGLETON(load_map_bl)
FORWARD_TO_OSD_SINGLETON(load_map_bls)
FORWARD_TO_OSD_SINGLETON(store_maps)
seastar::future<> set_up_epoch(epoch_t e);
template <typename F>
auto with_remote_shard_state(core_id_t core, F &&f) {
return shard_services.invoke_on(
core, [f=std::move(f)](auto &target_shard_services) mutable {
return std::invoke(
std::move(f), target_shard_services.local_state,
target_shard_services);
});
}
template <typename T, typename F>
auto with_remote_shard_state_and_op(
core_id_t core,
typename T::IRef &&op,
F &&f) {
if (seastar::this_shard_id() == core) {
auto &target_shard_services = shard_services.local();
return std::invoke(
std::move(f),
target_shard_services.local_state,
target_shard_services,
std::move(op));
}
return op->prepare_remote_submission(
).then([op=std::move(op), f=std::move(f), this, core
](auto f_conn) mutable {
return shard_services.invoke_on(
core,
[f=std::move(f), op=std::move(op), f_conn=std::move(f_conn)
](auto &target_shard_services) mutable {
op->finish_remote_submission(std::move(f_conn));
return std::invoke(
std::move(f),
target_shard_services.local_state,
target_shard_services,
std::move(op));
});
});
}
/// Runs opref on the appropriate core, creating the pg as necessary.
template <typename T>
seastar::future<> run_with_pg_maybe_create(
typename T::IRef op
) {
ceph_assert(op->use_count() == 1);
auto &logger = crimson::get_logger(ceph_subsys_osd);
static_assert(T::can_create());
logger.debug("{}: can_create", *op);
get_local_state().registry.remove_from_registry(*op);
return pg_to_shard_mapping.maybe_create_pg(
op->get_pgid()
).then([this, op = std::move(op)](auto core) mutable {
return this->template with_remote_shard_state_and_op<T>(
core, std::move(op),
[](PerShardState &per_shard_state,
ShardServices &shard_services,
typename T::IRef op) {
per_shard_state.registry.add_to_registry(*op);
auto &logger = crimson::get_logger(ceph_subsys_osd);
auto &opref = *op;
return opref.template with_blocking_event<
PGMap::PGCreationBlockingEvent
>([&shard_services, &opref](
auto &&trigger) {
return shard_services.get_or_create_pg(
std::move(trigger),
opref.get_pgid(),
std::move(opref.get_create_info())
);
}).safe_then([&logger, &shard_services, &opref](Ref<PG> pgref) {
logger.debug("{}: have_pg", opref);
return opref.with_pg(shard_services, pgref);
}).handle_error(
crimson::ct_error::ecanceled::handle([&logger, &opref](auto) {
logger.debug("{}: pg creation canceled, dropping", opref);
return seastar::now();
})
).then([op=std::move(op)] {});
});
});
}
/// Runs opref on the appropriate core, waiting for pg as necessary
template <typename T>
seastar::future<> run_with_pg_maybe_wait(
typename T::IRef op
) {
ceph_assert(op->use_count() == 1);
auto &logger = crimson::get_logger(ceph_subsys_osd);
static_assert(!T::can_create());
logger.debug("{}: !can_create", *op);
get_local_state().registry.remove_from_registry(*op);
return pg_to_shard_mapping.maybe_create_pg(
op->get_pgid()
).then([this, op = std::move(op)](auto core) mutable {
return this->template with_remote_shard_state_and_op<T>(
core, std::move(op),
[](PerShardState &per_shard_state,
ShardServices &shard_services,
typename T::IRef op) {
per_shard_state.registry.add_to_registry(*op);
auto &logger = crimson::get_logger(ceph_subsys_osd);
auto &opref = *op;
return opref.template with_blocking_event<
PGMap::PGCreationBlockingEvent
>([&shard_services, &opref](
auto &&trigger) {
return shard_services.wait_for_pg(
std::move(trigger), opref.get_pgid());
}).safe_then([&logger, &shard_services, &opref](Ref<PG> pgref) {
logger.debug("{}: have_pg", opref);
return opref.with_pg(shard_services, pgref);
}).handle_error(
crimson::ct_error::ecanceled::handle([&logger, &opref](auto) {
logger.debug("{}: pg creation canceled, dropping", opref);
return seastar::now();
})
).then([op=std::move(op)] {});
});
});
}
seastar::future<> load_pgs(crimson::os::FuturizedStore& store);
seastar::future<> stop_pgs();
seastar::future<std::map<pg_t, pg_stat_t>> get_pg_stats() const;
/**
* invoke_method_on_each_shard_seq
*
* Invokes shard_services method on each shard sequentially.
*/
template <typename F, typename... Args>
seastar::future<> invoke_on_each_shard_seq(
F &&f) const {
return sharded_map_seq(
shard_services,
[f=std::forward<F>(f)](const ShardServices &shard_services) mutable {
return std::invoke(
f,
shard_services);
});
}
/**
* for_each_pg
*
* Invokes f on each pg sequentially. Caller may rely on f not being
* invoked concurrently on multiple cores.
*/
template <typename F>
seastar::future<> for_each_pg(F &&f) const {
return invoke_on_each_shard_seq(
[f=std::move(f)](const auto &local_service) mutable {
for (auto &pg: local_service.local_state.pg_map.get_pgs()) {
std::apply(f, pg);
}
return seastar::now();
});
}
/**
* for_each_pgid
*
* Syncronously invokes f on each pgid
*/
template <typename F>
void for_each_pgid(F &&f) const {
return pg_to_shard_mapping.for_each_pgid(
std::forward<F>(f));
}
auto get_num_pgs() const {
return pg_to_shard_mapping.get_num_pgs();
}
seastar::future<> broadcast_map_to_pgs(epoch_t epoch);
template <typename F>
auto with_pg(spg_t pgid, F &&f) {
core_id_t core = pg_to_shard_mapping.get_pg_mapping(pgid);
return with_remote_shard_state(
core,
[pgid, f=std::move(f)](auto &local_state, auto &local_service) mutable {
return std::invoke(
std::move(f),
local_state.pg_map.get_pg(pgid));
});
}
template <typename T, typename... Args>
auto start_pg_operation(Args&&... args) {
auto op = get_local_state().registry.create_operation<T>(
std::forward<Args>(args)...);
auto &logger = crimson::get_logger(ceph_subsys_osd);
logger.debug("{}: starting {}", *op, __func__);
auto &opref = *op;
auto id = op->get_id();
if constexpr (T::is_trackable) {
op->template track_event<typename T::StartEvent>();
}
auto fut = opref.template enter_stage<>(
opref.get_connection_pipeline().await_active
).then([this, &opref, &logger] {
logger.debug("{}: start_pg_operation in await_active stage", opref);
return get_shard_services().local_state.osd_state.when_active();
}).then([&logger, &opref] {
logger.debug("{}: start_pg_operation active, entering await_map", opref);
return opref.template enter_stage<>(
opref.get_connection_pipeline().await_map);
}).then([this, &logger, &opref] {
logger.debug("{}: start_pg_operation await_map stage", opref);
using OSDMapBlockingEvent =
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent;
return opref.template with_blocking_event<OSDMapBlockingEvent>(
[this, &opref](auto &&trigger) {
std::ignore = this;
return get_shard_services().local_state.osdmap_gate.wait_for_map(
std::move(trigger),
opref.get_epoch(),
&get_shard_services());
});
}).then([&logger, &opref](auto epoch) {
logger.debug("{}: got map {}, entering get_pg", opref, epoch);
return opref.template enter_stage<>(
opref.get_connection_pipeline().get_pg);
}).then([this, &logger, &opref, op=std::move(op)]() mutable {
logger.debug("{}: in get_pg core {}", opref, seastar::this_shard_id());
logger.debug("{}: in get_pg", opref);
if constexpr (T::can_create()) {
logger.debug("{}: can_create", opref);
return run_with_pg_maybe_create<T>(std::move(op));
} else {
logger.debug("{}: !can_create", opref);
return run_with_pg_maybe_wait<T>(std::move(op));
}
});
return std::make_pair(id, std::move(fut));
}
#undef FORWARD
#undef FORWARD_CONST
#undef FORWARD_TO_OSD_SINGLETON
};
}
| 13,064 | 32.586118 | 92 | h |
null | ceph-main/src/crimson/osd/recovery_backend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include "crimson/common/type_helpers.h"
#include "crimson/os/futurized_store.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/osd/pg_interval_interrupt_condition.h"
#include "crimson/osd/object_context.h"
#include "crimson/osd/shard_services.h"
#include "messages/MOSDPGBackfill.h"
#include "messages/MOSDPGBackfillRemove.h"
#include "messages/MOSDPGScan.h"
#include "osd/recovery_types.h"
#include "osd/osd_types.h"
namespace crimson::osd{
class PG;
}
class PGBackend;
class RecoveryBackend {
public:
class WaitForObjectRecovery;
public:
template <typename T = void>
using interruptible_future =
::crimson::interruptible::interruptible_future<
::crimson::osd::IOInterruptCondition, T>;
using interruptor =
::crimson::interruptible::interruptor<
::crimson::osd::IOInterruptCondition>;
RecoveryBackend(crimson::osd::PG& pg,
crimson::osd::ShardServices& shard_services,
crimson::os::CollectionRef coll,
PGBackend* backend)
: pg{pg},
shard_services{shard_services},
store{&shard_services.get_store()},
coll{coll},
backend{backend} {}
virtual ~RecoveryBackend() {}
WaitForObjectRecovery& add_recovering(const hobject_t& soid) {
auto [it, added] = recovering.emplace(soid, new WaitForObjectRecovery{});
assert(added);
return *(it->second);
}
WaitForObjectRecovery& get_recovering(const hobject_t& soid) {
assert(is_recovering(soid));
return *(recovering.at(soid));
}
void remove_recovering(const hobject_t& soid) {
recovering.erase(soid);
}
bool is_recovering(const hobject_t& soid) const {
return recovering.count(soid) != 0;
}
uint64_t total_recovering() const {
return recovering.size();
}
virtual interruptible_future<> handle_recovery_op(
Ref<MOSDFastDispatchOp> m,
crimson::net::ConnectionRef conn);
virtual interruptible_future<> recover_object(
const hobject_t& soid,
eversion_t need) = 0;
virtual interruptible_future<> recover_delete(
const hobject_t& soid,
eversion_t need) = 0;
virtual interruptible_future<> push_delete(
const hobject_t& soid,
eversion_t need) = 0;
interruptible_future<BackfillInterval> scan_for_backfill(
const hobject_t& from,
std::int64_t min,
std::int64_t max);
void on_peering_interval_change(ceph::os::Transaction& t) {
clean_up(t, "new peering interval");
}
seastar::future<> stop() {
for (auto& [soid, recovery_waiter] : recovering) {
recovery_waiter->stop();
}
return on_stop();
}
protected:
crimson::osd::PG& pg;
crimson::osd::ShardServices& shard_services;
crimson::os::FuturizedStore::Shard* store;
crimson::os::CollectionRef coll;
PGBackend* backend;
struct pull_info_t {
pg_shard_t from;
hobject_t soid;
ObjectRecoveryProgress recovery_progress;
ObjectRecoveryInfo recovery_info;
crimson::osd::ObjectContextRef head_ctx;
crimson::osd::ObjectContextRef obc;
object_stat_sum_t stat;
bool is_complete() const {
return recovery_progress.is_complete(recovery_info);
}
};
struct push_info_t {
ObjectRecoveryProgress recovery_progress;
ObjectRecoveryInfo recovery_info;
crimson::osd::ObjectContextRef obc;
object_stat_sum_t stat;
};
public:
class WaitForObjectRecovery :
public boost::intrusive_ref_counter<
WaitForObjectRecovery, boost::thread_unsafe_counter>,
public crimson::BlockerT<WaitForObjectRecovery> {
seastar::shared_promise<> readable, recovered, pulled;
std::map<pg_shard_t, seastar::shared_promise<>> pushes;
public:
static constexpr const char* type_name = "WaitForObjectRecovery";
crimson::osd::ObjectContextRef obc;
std::optional<pull_info_t> pull_info;
std::map<pg_shard_t, push_info_t> pushing;
seastar::future<> wait_for_readable() {
return readable.get_shared_future();
}
seastar::future<> wait_for_pushes(pg_shard_t shard) {
return pushes[shard].get_shared_future();
}
seastar::future<> wait_for_recovered() {
return recovered.get_shared_future();
}
template <typename T, typename F>
auto wait_track_blocking(T &trigger, F &&fut) {
WaitForObjectRecoveryRef ref = this;
return track_blocking(
trigger,
std::forward<F>(fut)
).finally([ref] {});
}
template <typename T>
seastar::future<> wait_for_recovered(T &trigger) {
WaitForObjectRecoveryRef ref = this;
return wait_track_blocking(trigger, recovered.get_shared_future());
}
seastar::future<> wait_for_pull() {
return pulled.get_shared_future();
}
void set_readable() {
readable.set_value();
}
void set_recovered() {
recovered.set_value();
}
void set_pushed(pg_shard_t shard) {
pushes[shard].set_value();
}
void set_pulled() {
pulled.set_value();
}
void set_push_failed(pg_shard_t shard, std::exception_ptr e) {
pushes.at(shard).set_exception(e);
}
void interrupt(std::string_view why) {
readable.set_exception(std::system_error(
std::make_error_code(std::errc::interrupted), why.data()));
recovered.set_exception(std::system_error(
std::make_error_code(std::errc::interrupted), why.data()));
pulled.set_exception(std::system_error(
std::make_error_code(std::errc::interrupted), why.data()));
for (auto& [pg_shard, pr] : pushes) {
pr.set_exception(std::system_error(
std::make_error_code(std::errc::interrupted), why.data()));
}
}
void stop();
void dump_detail(Formatter* f) const {
}
};
using RecoveryBlockingEvent =
crimson::AggregateBlockingEvent<WaitForObjectRecovery::BlockingEvent>;
using WaitForObjectRecoveryRef = boost::intrusive_ptr<WaitForObjectRecovery>;
protected:
std::map<hobject_t, WaitForObjectRecoveryRef> recovering;
hobject_t get_temp_recovery_object(
const hobject_t& target,
eversion_t version) const;
boost::container::flat_set<hobject_t> temp_contents;
void add_temp_obj(const hobject_t &oid) {
temp_contents.insert(oid);
}
void clear_temp_obj(const hobject_t &oid) {
temp_contents.erase(oid);
}
void clean_up(ceph::os::Transaction& t, std::string_view why);
virtual seastar::future<> on_stop() = 0;
private:
void handle_backfill_finish(
MOSDPGBackfill& m,
crimson::net::ConnectionRef conn);
interruptible_future<> handle_backfill_progress(
MOSDPGBackfill& m);
interruptible_future<> handle_backfill_finish_ack(
MOSDPGBackfill& m);
interruptible_future<> handle_backfill(
MOSDPGBackfill& m,
crimson::net::ConnectionRef conn);
interruptible_future<> handle_scan_get_digest(
MOSDPGScan& m,
crimson::net::ConnectionRef conn);
interruptible_future<> handle_scan_digest(
MOSDPGScan& m);
interruptible_future<> handle_scan(
MOSDPGScan& m,
crimson::net::ConnectionRef conn);
interruptible_future<> handle_backfill_remove(MOSDPGBackfillRemove& m);
};
| 7,161 | 29.606838 | 79 | h |
null | ceph-main/src/crimson/osd/replicated_backend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <seastar/core/future.hh>
#include <seastar/core/weak_ptr.hh>
#include "include/buffer_fwd.h"
#include "osd/osd_types.h"
#include "acked_peers.h"
#include "pg_backend.h"
namespace crimson::osd {
class ShardServices;
}
class ReplicatedBackend : public PGBackend
{
public:
ReplicatedBackend(pg_t pgid, pg_shard_t whoami,
CollectionRef coll,
crimson::osd::ShardServices& shard_services,
DoutPrefixProvider &dpp);
void got_rep_op_reply(const MOSDRepOpReply& reply) final;
seastar::future<> stop() final;
void on_actingset_changed(bool same_primary) final;
private:
ll_read_ierrorator::future<ceph::bufferlist>
_read(const hobject_t& hoid, uint64_t off,
uint64_t len, uint32_t flags) override;
rep_op_fut_t _submit_transaction(std::set<pg_shard_t>&& pg_shards,
const hobject_t& hoid,
ceph::os::Transaction&& txn,
osd_op_params_t&& osd_op_p,
epoch_t min_epoch, epoch_t max_epoch,
std::vector<pg_log_entry_t>&& log_entries) final;
const pg_t pgid;
const pg_shard_t whoami;
class pending_on_t : public seastar::weakly_referencable<pending_on_t> {
public:
pending_on_t(size_t pending, const eversion_t& at_version)
: pending{static_cast<unsigned>(pending)}, at_version(at_version)
{}
unsigned pending;
// The order of pending_txns' at_version must be the same as their
// corresponding ceph_tid_t, as we rely on this condition for checking
// whether a client request is already completed. To put it another
// way, client requests at_version must be updated synchorously/simultaneously
// with ceph_tid_t.
const eversion_t at_version;
crimson::osd::acked_peers_t acked_peers;
seastar::shared_promise<> all_committed;
};
using pending_transactions_t = std::map<ceph_tid_t, pending_on_t>;
pending_transactions_t pending_trans;
seastar::future<> request_committed(
const osd_reqid_t& reqid, const eversion_t& at_version) final;
};
| 2,109 | 33.032258 | 82 | h |
null | ceph-main/src/crimson/osd/replicated_recovery_backend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/common/interruptible_future.h"
#include "crimson/osd/pg_interval_interrupt_condition.h"
#include "crimson/osd/recovery_backend.h"
#include "messages/MOSDPGPull.h"
#include "messages/MOSDPGPush.h"
#include "messages/MOSDPGPushReply.h"
#include "messages/MOSDPGRecoveryDelete.h"
#include "messages/MOSDPGRecoveryDeleteReply.h"
#include "os/ObjectStore.h"
class ReplicatedRecoveryBackend : public RecoveryBackend {
public:
ReplicatedRecoveryBackend(crimson::osd::PG& pg,
crimson::osd::ShardServices& shard_services,
crimson::os::CollectionRef coll,
PGBackend* backend)
: RecoveryBackend(pg, shard_services, coll, backend)
{}
interruptible_future<> handle_recovery_op(
Ref<MOSDFastDispatchOp> m,
crimson::net::ConnectionRef conn) final;
interruptible_future<> recover_object(
const hobject_t& soid,
eversion_t need) final;
interruptible_future<> recover_delete(
const hobject_t& soid,
eversion_t need) final;
interruptible_future<> push_delete(
const hobject_t& soid,
eversion_t need) final;
protected:
interruptible_future<> handle_pull(
Ref<MOSDPGPull> m);
interruptible_future<> handle_pull_response(
Ref<MOSDPGPush> m);
interruptible_future<> handle_push(
Ref<MOSDPGPush> m);
interruptible_future<> handle_push_reply(
Ref<MOSDPGPushReply> m);
interruptible_future<> handle_recovery_delete(
Ref<MOSDPGRecoveryDelete> m);
interruptible_future<> handle_recovery_delete_reply(
Ref<MOSDPGRecoveryDeleteReply> m);
interruptible_future<PushOp> prep_push(
const hobject_t& soid,
eversion_t need,
pg_shard_t pg_shard);
void prepare_pull(
PullOp& pull_op,
pull_info_t& pull_info,
const hobject_t& soid,
eversion_t need);
std::vector<pg_shard_t> get_shards_to_push(
const hobject_t& soid) const;
interruptible_future<PushOp> build_push_op(
const ObjectRecoveryInfo& recovery_info,
const ObjectRecoveryProgress& progress,
object_stat_sum_t* stat);
/// @returns true if this push op is the last push op for
/// recovery @c pop.soid
interruptible_future<bool> _handle_pull_response(
pg_shard_t from,
PushOp& push_op,
PullOp* response,
ceph::os::Transaction* t);
std::pair<interval_set<uint64_t>, ceph::bufferlist> trim_pushed_data(
const interval_set<uint64_t> ©_subset,
const interval_set<uint64_t> &intervals_received,
ceph::bufferlist data_received);
interruptible_future<> submit_push_data(
const ObjectRecoveryInfo &recovery_info,
bool first,
bool complete,
bool clear_omap,
interval_set<uint64_t>&& data_zeros,
interval_set<uint64_t>&& intervals_included,
ceph::bufferlist&& data_included,
ceph::bufferlist&& omap_header,
const std::map<std::string, bufferlist, std::less<>> &attrs,
std::map<std::string, bufferlist>&& omap_entries,
ceph::os::Transaction *t);
void submit_push_complete(
const ObjectRecoveryInfo &recovery_info,
ObjectStore::Transaction *t);
interruptible_future<> _handle_push(
pg_shard_t from,
PushOp& push_op,
PushReplyOp *response,
ceph::os::Transaction *t);
interruptible_future<std::optional<PushOp>> _handle_push_reply(
pg_shard_t peer,
const PushReplyOp &op);
interruptible_future<> on_local_recover_persist(
const hobject_t& soid,
const ObjectRecoveryInfo& _recovery_info,
bool is_delete,
epoch_t epoch_to_freeze);
interruptible_future<> local_recover_delete(
const hobject_t& soid,
eversion_t need,
epoch_t epoch_frozen);
seastar::future<> on_stop() final {
return seastar::now();
}
private:
/// pull missing object from peer
interruptible_future<> maybe_pull_missing_obj(
const hobject_t& soid,
eversion_t need);
/// load object context for recovery if it is not ready yet
using load_obc_ertr = crimson::errorator<
crimson::ct_error::object_corrupted>;
using load_obc_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
load_obc_ertr>;
interruptible_future<> maybe_push_shards(
const hobject_t& soid,
eversion_t need);
/// read the data attached to given object. the size of them is supposed to
/// be relatively small.
///
/// @return @c oi.version
interruptible_future<eversion_t> read_metadata_for_push_op(
const hobject_t& oid,
const ObjectRecoveryProgress& progress,
ObjectRecoveryProgress& new_progress,
eversion_t ver,
PushOp* push_op);
/// read the remaining extents of object to be recovered and fill push_op
/// with them
///
/// @param oid object being recovered
/// @param copy_subset extents we want
/// @param offset the offset in object from where we should read
/// @return the new offset
interruptible_future<uint64_t> read_object_for_push_op(
const hobject_t& oid,
const interval_set<uint64_t>& copy_subset,
uint64_t offset,
uint64_t max_len,
PushOp* push_op);
interruptible_future<> read_omap_for_push_op(
const hobject_t& oid,
const ObjectRecoveryProgress& progress,
ObjectRecoveryProgress& new_progress,
uint64_t& max_len,
PushOp* push_op);
interruptible_future<hobject_t> prep_push_target(
const ObjectRecoveryInfo &recovery_info,
bool first,
bool complete,
bool clear_omap,
ObjectStore::Transaction* t,
const std::map<std::string, bufferlist, std::less<>> &attrs,
bufferlist&& omap_header);
using interruptor = crimson::interruptible::interruptor<
crimson::osd::IOInterruptCondition>;
};
| 5,714 | 32.617647 | 77 | h |
null | ceph-main/src/crimson/osd/shard_services.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include <boost/intrusive_ptr.hpp>
#include <seastar/core/future.hh>
#include "include/common_fwd.h"
#include "osd_operation.h"
#include "msg/MessageRef.h"
#include "crimson/common/exception.h"
#include "crimson/common/shared_lru.h"
#include "crimson/os/futurized_collection.h"
#include "osd/PeeringState.h"
#include "crimson/osd/osdmap_service.h"
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/osd_meta.h"
#include "crimson/osd/object_context.h"
#include "crimson/osd/pg_map.h"
#include "crimson/osd/state.h"
#include "common/AsyncReserver.h"
#include "crimson/net/Connection.h"
namespace crimson::net {
class Messenger;
}
namespace crimson::mgr {
class Client;
}
namespace crimson::mon {
class Client;
}
namespace crimson::os {
class FuturizedStore;
}
class OSDMap;
class PeeringCtx;
class BufferedRecoveryMessages;
namespace crimson::osd {
class PGShardManager;
/**
* PerShardState
*
* Per-shard state holding instances local to each shard.
*/
class PerShardState {
friend class ShardServices;
friend class PGShardManager;
friend class OSD;
using cached_map_t = OSDMapService::cached_map_t;
using local_cached_map_t = OSDMapService::local_cached_map_t;
const core_id_t core = seastar::this_shard_id();
#define assert_core() ceph_assert(seastar::this_shard_id() == core);
const int whoami;
crimson::os::FuturizedStore::Shard &store;
crimson::common::CephContext cct;
OSDState &osd_state;
OSD_OSDMapGate osdmap_gate;
PerfCounters *perf = nullptr;
PerfCounters *recoverystate_perf = nullptr;
// Op Management
OSDOperationRegistry registry;
OperationThrottler throttler;
seastar::future<> dump_ops_in_flight(Formatter *f) const;
epoch_t up_epoch = 0;
OSDMapService::cached_map_t osdmap;
const auto &get_osdmap() const {
assert_core();
return osdmap;
}
void update_map(OSDMapService::cached_map_t new_osdmap) {
assert_core();
osdmap = std::move(new_osdmap);
}
void set_up_epoch(epoch_t epoch) {
assert_core();
up_epoch = epoch;
}
// prevent creating new osd operations when system is shutting down,
// this is necessary because there are chances that a new operation
// is created, after the interruption of all ongoing operations, and
// creats and waits on a new and may-never-resolve future, in which
// case the shutdown may never succeed.
bool stopping = false;
seastar::future<> stop_registry() {
assert_core();
crimson::get_logger(ceph_subsys_osd).info("PerShardState::{}", __func__);
stopping = true;
return registry.stop();
}
// PGMap state
PGMap pg_map;
seastar::future<> stop_pgs();
std::map<pg_t, pg_stat_t> get_pg_stats() const;
seastar::future<> broadcast_map_to_pgs(
ShardServices &shard_services,
epoch_t epoch);
Ref<PG> get_pg(spg_t pgid);
template <typename F>
void for_each_pg(F &&f) const {
assert_core();
for (auto &pg : pg_map.get_pgs()) {
std::invoke(f, pg.first, pg.second);
}
}
template <typename T, typename... Args>
auto start_operation(Args&&... args) {
assert_core();
if (__builtin_expect(stopping, false)) {
throw crimson::common::system_shutdown_exception();
}
auto op = registry.create_operation<T>(std::forward<Args>(args)...);
crimson::get_logger(ceph_subsys_osd).info(
"PerShardState::{}, {}", __func__, *op);
auto fut = seastar::yield().then([op] {
return op->start().finally([op /* by copy */] {
// ensure the op's lifetime is appropriate. It is not enough to
// guarantee it's alive at the scheduling stages (i.e. `then()`
// calling) but also during the actual execution (i.e. when passed
// lambdas are actually run).
});
});
return std::make_pair(std::move(op), std::move(fut));
}
template <typename InterruptorT, typename T, typename... Args>
auto start_operation_may_interrupt(Args&&... args) {
assert_core();
if (__builtin_expect(stopping, false)) {
throw crimson::common::system_shutdown_exception();
}
auto op = registry.create_operation<T>(std::forward<Args>(args)...);
crimson::get_logger(ceph_subsys_osd).info(
"PerShardState::{}, {}", __func__, *op);
auto fut = InterruptorT::make_interruptible(
seastar::yield()
).then_interruptible([op] {
return op->start().finally([op /* by copy */] {
// ensure the op's lifetime is appropriate. It is not enough to
// guarantee it's alive at the scheduling stages (i.e. `then()`
// calling) but also during the actual execution (i.e. when passed
// lambdas are actually run).
});
});
return std::make_pair(std::move(op), std::move(fut));
}
// tids for ops i issue, prefixed with core id to ensure uniqueness
ceph_tid_t next_tid;
ceph_tid_t get_tid() {
assert_core();
return next_tid++;
}
HeartbeatStampsRef get_hb_stamps(int peer);
std::map<int, HeartbeatStampsRef> heartbeat_stamps;
// Time state
const ceph::mono_time startup_time;
ceph::signedspan get_mnow() const {
assert_core();
return ceph::mono_clock::now() - startup_time;
}
public:
PerShardState(
int whoami,
ceph::mono_time startup_time,
PerfCounters *perf,
PerfCounters *recoverystate_perf,
crimson::os::FuturizedStore &store,
OSDState& osd_state);
};
/**
* OSDSingletonState
*
* OSD-wide singleton holding instances that need to be accessible
* from all PGs.
*/
class OSDSingletonState : public md_config_obs_t {
friend class ShardServices;
friend class PGShardManager;
friend class OSD;
using cached_map_t = OSDMapService::cached_map_t;
using local_cached_map_t = OSDMapService::local_cached_map_t;
public:
OSDSingletonState(
int whoami,
crimson::net::Messenger &cluster_msgr,
crimson::net::Messenger &public_msgr,
crimson::mon::Client &monc,
crimson::mgr::Client &mgrc);
private:
const int whoami;
crimson::common::CephContext cct;
PerfCounters *perf = nullptr;
PerfCounters *recoverystate_perf = nullptr;
SharedLRU<epoch_t, OSDMap> osdmaps;
SimpleLRU<epoch_t, bufferlist, false> map_bl_cache;
cached_map_t osdmap;
cached_map_t &get_osdmap() { return osdmap; }
void update_map(cached_map_t new_osdmap) {
osdmap = std::move(new_osdmap);
}
crimson::net::Messenger &cluster_msgr;
crimson::net::Messenger &public_msgr;
seastar::future<> send_to_osd(int peer, MessageURef m, epoch_t from_epoch);
crimson::mon::Client &monc;
seastar::future<> osdmap_subscribe(version_t epoch, bool force_request);
crimson::mgr::Client &mgrc;
std::unique_ptr<OSDMeta> meta_coll;
template <typename... Args>
void init_meta_coll(Args&&... args) {
meta_coll = std::make_unique<OSDMeta>(std::forward<Args>(args)...);
}
OSDMeta &get_meta_coll() {
assert(meta_coll);
return *meta_coll;
}
OSDSuperblock superblock;
void set_superblock(OSDSuperblock _superblock) {
superblock = std::move(_superblock);
}
seastar::future<> send_incremental_map(
crimson::net::Connection &conn,
epoch_t first);
seastar::future<> send_incremental_map_to_osd(int osd, epoch_t first);
auto get_pool_info(int64_t poolid) {
return get_meta_coll().load_final_pool_info(poolid);
}
// global pg temp state
struct pg_temp_t {
std::vector<int> acting;
bool forced = false;
};
std::map<pg_t, pg_temp_t> pg_temp_wanted;
std::map<pg_t, pg_temp_t> pg_temp_pending;
friend std::ostream& operator<<(std::ostream&, const pg_temp_t&);
void queue_want_pg_temp(pg_t pgid, const std::vector<int>& want,
bool forced = false);
void remove_want_pg_temp(pg_t pgid);
void requeue_pg_temp();
seastar::future<> send_pg_temp();
std::set<pg_t> pg_created;
seastar::future<> send_pg_created(pg_t pgid);
seastar::future<> send_pg_created();
void prune_pg_created();
struct DirectFinisher {
void queue(Context *c) {
c->complete(0);
}
} finisher;
AsyncReserver<spg_t, DirectFinisher> local_reserver;
AsyncReserver<spg_t, DirectFinisher> remote_reserver;
AsyncReserver<spg_t, DirectFinisher> snap_reserver;
epoch_t up_thru_wanted = 0;
seastar::future<> send_alive(epoch_t want);
const char** get_tracked_conf_keys() const final;
void handle_conf_change(
const ConfigProxy& conf,
const std::set <std::string> &changed) final;
seastar::future<local_cached_map_t> get_local_map(epoch_t e);
seastar::future<std::unique_ptr<OSDMap>> load_map(epoch_t e);
seastar::future<bufferlist> load_map_bl(epoch_t e);
seastar::future<std::map<epoch_t, bufferlist>>
load_map_bls(epoch_t first, epoch_t last);
void store_map_bl(ceph::os::Transaction& t,
epoch_t e, bufferlist&& bl);
seastar::future<> store_maps(ceph::os::Transaction& t,
epoch_t start, Ref<MOSDMap> m);
};
/**
* Represents services available to each PG
*/
class ShardServices : public OSDMapService {
friend class PGShardManager;
friend class OSD;
using cached_map_t = OSDMapService::cached_map_t;
using local_cached_map_t = OSDMapService::local_cached_map_t;
PerShardState local_state;
seastar::sharded<OSDSingletonState> &osd_singleton_state;
PGShardMapping& pg_to_shard_mapping;
template <typename F, typename... Args>
auto with_singleton(F &&f, Args&&... args) {
return osd_singleton_state.invoke_on(
PRIMARY_CORE,
std::forward<F>(f),
std::forward<Args>(args)...
);
}
#define FORWARD_CONST(FROM_METHOD, TO_METHOD, TARGET) \
template <typename... Args> \
auto FROM_METHOD(Args&&... args) const { \
return TARGET.TO_METHOD(std::forward<Args>(args)...); \
}
#define FORWARD(FROM_METHOD, TO_METHOD, TARGET) \
template <typename... Args> \
auto FROM_METHOD(Args&&... args) { \
return TARGET.TO_METHOD(std::forward<Args>(args)...); \
}
#define FORWARD_TO_LOCAL(METHOD) FORWARD(METHOD, METHOD, local_state)
#define FORWARD_TO_LOCAL_CONST(METHOD) FORWARD_CONST( \
METHOD, METHOD, local_state) \
#define FORWARD_TO_OSD_SINGLETON_TARGET(METHOD, TARGET) \
template <typename... Args> \
auto METHOD(Args&&... args) { \
return with_singleton( \
[](auto &local_state, auto&&... args) { \
return local_state.TARGET( \
std::forward<decltype(args)>(args)...); \
}, std::forward<Args>(args)...); \
}
#define FORWARD_TO_OSD_SINGLETON(METHOD) \
FORWARD_TO_OSD_SINGLETON_TARGET(METHOD, METHOD)
public:
template <typename... PSSArgs>
ShardServices(
seastar::sharded<OSDSingletonState> &osd_singleton_state,
PGShardMapping& pg_to_shard_mapping,
PSSArgs&&... args)
: local_state(std::forward<PSSArgs>(args)...),
osd_singleton_state(osd_singleton_state),
pg_to_shard_mapping(pg_to_shard_mapping) {}
FORWARD_TO_OSD_SINGLETON(send_to_osd)
crimson::os::FuturizedStore::Shard &get_store() {
return local_state.store;
}
auto remove_pg(spg_t pgid) {
local_state.pg_map.remove_pg(pgid);
return pg_to_shard_mapping.remove_pg(pgid);
}
crimson::common::CephContext *get_cct() {
return &(local_state.cct);
}
template <typename T, typename... Args>
auto start_operation(Args&&... args) {
return local_state.start_operation<T>(std::forward<Args>(args)...);
}
template <typename InterruptorT, typename T, typename... Args>
auto start_operation_may_interrupt(Args&&... args) {
return local_state.start_operation_may_interrupt<
InterruptorT, T>(std::forward<Args>(args)...);
}
auto &get_registry() { return local_state.registry; }
// Loggers
PerfCounters &get_recoverystate_perf_logger() {
return *local_state.recoverystate_perf;
}
PerfCounters &get_perf_logger() {
return *local_state.perf;
}
// Diagnostics
FORWARD_TO_LOCAL_CONST(dump_ops_in_flight);
// Local PG Management
seastar::future<Ref<PG>> make_pg(
cached_map_t create_map,
spg_t pgid,
bool do_create);
seastar::future<Ref<PG>> handle_pg_create_info(
std::unique_ptr<PGCreateInfo> info);
using get_or_create_pg_ertr = PGMap::wait_for_pg_ertr;
using get_or_create_pg_ret = get_or_create_pg_ertr::future<Ref<PG>>;
get_or_create_pg_ret get_or_create_pg(
PGMap::PGCreationBlockingEvent::TriggerI&&,
spg_t pgid,
std::unique_ptr<PGCreateInfo> info);
using wait_for_pg_ertr = PGMap::wait_for_pg_ertr;
using wait_for_pg_ret = wait_for_pg_ertr::future<Ref<PG>>;
wait_for_pg_ret wait_for_pg(
PGMap::PGCreationBlockingEvent::TriggerI&&, spg_t pgid);
seastar::future<Ref<PG>> load_pg(spg_t pgid);
/// Dispatch and reset ctx transaction
seastar::future<> dispatch_context_transaction(
crimson::os::CollectionRef col, PeeringCtx &ctx);
/// Dispatch and reset ctx messages
seastar::future<> dispatch_context_messages(
BufferedRecoveryMessages &&ctx);
/// Dispatch ctx and dispose of context
seastar::future<> dispatch_context(
crimson::os::CollectionRef col,
PeeringCtx &&ctx);
/// Dispatch ctx and dispose of ctx, transaction must be empty
seastar::future<> dispatch_context(
PeeringCtx &&ctx) {
return dispatch_context({}, std::move(ctx));
}
/// Return per-core tid
ceph_tid_t get_tid() { return local_state.get_tid(); }
/// Return core-local pg count * number of cores
unsigned get_num_local_pgs() const {
return local_state.pg_map.get_pg_count();
}
// OSDMapService
cached_map_t get_map() const final { return local_state.get_osdmap(); }
epoch_t get_up_epoch() const final { return local_state.up_epoch; }
seastar::future<cached_map_t> get_map(epoch_t e) final {
return with_singleton(
[](auto &sstate, epoch_t e) {
return sstate.get_local_map(
e
).then([](auto lmap) {
return seastar::foreign_ptr<local_cached_map_t>(lmap);
});
}, e).then([](auto fmap) {
return make_local_shared_foreign(std::move(fmap));
});
}
FORWARD_TO_OSD_SINGLETON(get_pool_info)
FORWARD(with_throttle_while, with_throttle_while, local_state.throttler)
FORWARD_TO_OSD_SINGLETON(send_incremental_map)
FORWARD_TO_OSD_SINGLETON(send_incremental_map_to_osd)
FORWARD_TO_OSD_SINGLETON(osdmap_subscribe)
FORWARD_TO_OSD_SINGLETON(queue_want_pg_temp)
FORWARD_TO_OSD_SINGLETON(remove_want_pg_temp)
FORWARD_TO_OSD_SINGLETON(requeue_pg_temp)
FORWARD_TO_OSD_SINGLETON(send_pg_created)
FORWARD_TO_OSD_SINGLETON(send_alive)
FORWARD_TO_OSD_SINGLETON(send_pg_temp)
FORWARD_TO_LOCAL_CONST(get_mnow)
FORWARD_TO_LOCAL(get_hb_stamps)
FORWARD(pg_created, pg_created, local_state.pg_map)
FORWARD_TO_OSD_SINGLETON_TARGET(
local_update_priority,
local_reserver.update_priority)
FORWARD_TO_OSD_SINGLETON_TARGET(
local_cancel_reservation,
local_reserver.cancel_reservation)
FORWARD_TO_OSD_SINGLETON_TARGET(
local_dump_reservations,
local_reserver.dump)
FORWARD_TO_OSD_SINGLETON_TARGET(
remote_cancel_reservation,
remote_reserver.cancel_reservation)
FORWARD_TO_OSD_SINGLETON_TARGET(
remote_dump_reservations,
remote_reserver.dump)
FORWARD_TO_OSD_SINGLETON_TARGET(
snap_cancel_reservation,
snap_reserver.cancel_reservation)
FORWARD_TO_OSD_SINGLETON_TARGET(
snap_dump_reservations,
snap_reserver.dump)
Context *invoke_context_on_core(core_id_t core, Context *c) {
if (!c) return nullptr;
return new LambdaContext([core, c](int code) {
std::ignore = seastar::smp::submit_to(
core,
[c, code] {
c->complete(code);
});
});
}
seastar::future<> local_request_reservation(
spg_t item,
Context *on_reserved,
unsigned prio,
Context *on_preempt) {
return with_singleton(
[item, prio](OSDSingletonState &singleton,
Context *wrapped_on_reserved, Context *wrapped_on_preempt) {
return singleton.local_reserver.request_reservation(
item,
wrapped_on_reserved,
prio,
wrapped_on_preempt);
},
invoke_context_on_core(seastar::this_shard_id(), on_reserved),
invoke_context_on_core(seastar::this_shard_id(), on_preempt));
}
seastar::future<> remote_request_reservation(
spg_t item,
Context *on_reserved,
unsigned prio,
Context *on_preempt) {
return with_singleton(
[item, prio](OSDSingletonState &singleton,
Context *wrapped_on_reserved, Context *wrapped_on_preempt) {
return singleton.remote_reserver.request_reservation(
item,
wrapped_on_reserved,
prio,
wrapped_on_preempt);
},
invoke_context_on_core(seastar::this_shard_id(), on_reserved),
invoke_context_on_core(seastar::this_shard_id(), on_preempt));
}
seastar::future<> snap_request_reservation(
spg_t item,
Context *on_reserved,
unsigned prio) {
return with_singleton(
[item, prio](OSDSingletonState &singleton,
Context *wrapped_on_reserved) {
return singleton.snap_reserver.request_reservation(
item,
wrapped_on_reserved,
prio);
},
invoke_context_on_core(seastar::this_shard_id(), on_reserved));
}
#undef FORWARD_CONST
#undef FORWARD
#undef FORWARD_TO_OSD_SINGLETON
#undef FORWARD_TO_LOCAL
#undef FORWARD_TO_LOCAL_CONST
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::OSDSingletonState::pg_temp_t> : fmt::ostream_formatter {};
#endif
| 17,439 | 28.509306 | 106 | h |
null | ceph-main/src/crimson/osd/state.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string_view>
#include <ostream>
#include <seastar/core/shared_future.hh>
class OSDMap;
namespace crimson::osd {
// seastar::sharded puts start_single on core 0
constexpr core_id_t PRIMARY_CORE = 0;
/**
* OSDState
*
* Maintains state representing the OSD's progress from booting through
* shutdown.
*
* Shards other than PRIMARY_CORE may use their local instance to check
* on ACTIVE and STOPPING. All other methods are restricted to
* PRIMARY_CORE (such methods start with an assert to this effect).
*/
class OSDState : public seastar::peering_sharded_service<OSDState> {
enum class State {
INITIALIZING,
PREBOOT,
BOOTING,
ACTIVE,
PRESTOP,
STOPPING,
WAITING_FOR_HEALTHY,
};
State state = State::INITIALIZING;
mutable seastar::shared_promise<> wait_for_active;
/// Sets local instance state to active, called from set_active
void _set_active() {
state = State::ACTIVE;
wait_for_active.set_value();
wait_for_active = {};
}
/// Sets local instance state to stopping, called from set_stopping
void _set_stopping() {
state = State::STOPPING;
wait_for_active.set_exception(crimson::common::system_shutdown_exception{});
wait_for_active = {};
}
public:
bool is_initializing() const {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return state == State::INITIALIZING;
}
bool is_preboot() const {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return state == State::PREBOOT;
}
bool is_booting() const {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return state == State::BOOTING;
}
bool is_active() const {
return state == State::ACTIVE;
}
seastar::future<> when_active() const {
return is_active() ? seastar::now()
: wait_for_active.get_shared_future();
};
bool is_prestop() const {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return state == State::PRESTOP;
}
bool is_stopping() const {
return state == State::STOPPING;
}
bool is_waiting_for_healthy() const {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return state == State::WAITING_FOR_HEALTHY;
}
void set_preboot() {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
state = State::PREBOOT;
}
void set_booting() {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
state = State::BOOTING;
}
/// Sets all shards to active
seastar::future<> set_active() {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return container().invoke_on_all([](auto& osd_state) {
osd_state._set_active();
});
}
void set_prestop() {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
state = State::PRESTOP;
}
/// Sets all shards to stopping
seastar::future<> set_stopping() {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return container().invoke_on_all([](auto& osd_state) {
osd_state._set_stopping();
});
}
std::string_view to_string() const {
switch (state) {
case State::INITIALIZING: return "initializing";
case State::PREBOOT: return "preboot";
case State::BOOTING: return "booting";
case State::ACTIVE: return "active";
case State::PRESTOP: return "prestop";
case State::STOPPING: return "stopping";
case State::WAITING_FOR_HEALTHY: return "waiting_for_healthy";
default: return "???";
}
}
};
inline std::ostream&
operator<<(std::ostream& os, const OSDState& s) {
return os << s.to_string();
}
}
| 3,658 | 26.931298 | 80 | h |
null | ceph-main/src/crimson/osd/stop_signal.h | /*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2020 Cloudius Systems, Ltd.
*/
#pragma once
#include <seastar/core/abort_source.hh>
#include <seastar/core/reactor.hh>
#include <seastar/core/condition-variable.hh>
/// Seastar apps lib namespace
namespace seastar_apps_lib {
/// \brief Futurized SIGINT/SIGTERM signals handler class
///
/// Seastar-style helper class that allows easy waiting for SIGINT/SIGTERM signals
/// from your app.
///
/// Example:
/// \code
/// #include <seastar/apps/lib/stop_signal.hh>
/// ...
/// int main() {
/// ...
/// seastar::thread th([] {
/// seastar_apps_lib::stop_signal stop_signal;
/// <some code>
/// stop_signal.wait().get(); // this will wait till we receive SIGINT or SIGTERM signal
/// });
/// \endcode
class stop_signal {
seastar::condition_variable _cond;
seastar::abort_source _abort_source;
private:
void on_signal() {
if (stopping()) {
return;
}
_abort_source.request_abort();
_cond.broadcast();
}
public:
stop_signal() {
seastar::engine().handle_signal(SIGINT, [this] { on_signal(); });
seastar::engine().handle_signal(SIGTERM, [this] { on_signal(); });
}
~stop_signal() {
// There's no way to unregister a handler yet, so register a no-op handler instead.
seastar::engine().handle_signal(SIGINT, [] {});
seastar::engine().handle_signal(SIGTERM, [] {});
}
seastar::future<> wait() {
return _cond.wait([this] { return _abort_source.abort_requested(); });
}
bool stopping() const {
return _abort_source.abort_requested();
}
auto& abort_source() {
return _abort_source;
}
};
}
| 2,412 | 27.72619 | 92 | h |
null | ceph-main/src/crimson/osd/watch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iterator>
#include <map>
#include <set>
#include <seastar/core/shared_ptr.hh>
#include "crimson/net/Connection.h"
#include "crimson/osd/object_context.h"
#include "crimson/osd/pg.h"
#include "include/denc.h"
namespace crimson::osd {
class Notify;
using NotifyRef = seastar::shared_ptr<Notify>;
// NOTE: really need to have this public. Otherwise `shared_from_this()`
// will abort. According to cppreference.com:
//
// "The constructors of std::shared_ptr detect the presence
// of an unambiguous and accessible (ie. public inheritance
// is mandatory) (since C++17) enable_shared_from_this base".
//
// I expect the `seastar::shared_ptr` shares this behaviour.
class Watch : public seastar::enable_shared_from_this<Watch> {
// this is a private tag for the public constructor that turns it into
// de facto private one. The motivation behind the hack is make_shared
// used by create().
struct private_ctag_t{};
std::set<NotifyRef, std::less<>> in_progress_notifies;
crimson::net::ConnectionRef conn;
crimson::osd::ObjectContextRef obc;
watch_info_t winfo;
entity_name_t entity_name;
Ref<PG> pg;
seastar::timer<seastar::lowres_clock> timeout_timer;
seastar::future<> start_notify(NotifyRef);
seastar::future<> send_notify_msg(NotifyRef);
seastar::future<> send_disconnect_msg();
friend Notify;
friend class WatchTimeoutRequest;
public:
Watch(private_ctag_t,
crimson::osd::ObjectContextRef obc,
const watch_info_t& winfo,
const entity_name_t& entity_name,
Ref<PG> pg)
: obc(std::move(obc)),
winfo(winfo),
entity_name(entity_name),
pg(std::move(pg)),
timeout_timer([this] {
return do_watch_timeout();
}) {
assert(this->pg);
}
~Watch();
seastar::future<> connect(crimson::net::ConnectionRef, bool);
void disconnect();
bool is_alive() const {
return true;
}
bool is_connected() const {
return static_cast<bool>(conn);
}
void got_ping(utime_t);
void discard_state();
seastar::future<> remove();
/// Call when notify_ack received on notify_id
seastar::future<> notify_ack(
uint64_t notify_id, ///< [in] id of acked notify
const ceph::bufferlist& reply_bl); ///< [in] notify reply buffer
template <class... Args>
static seastar::shared_ptr<Watch> create(Args&&... args) {
return seastar::make_shared<Watch>(private_ctag_t{},
std::forward<Args>(args)...);
};
uint64_t get_watcher_gid() const {
return entity_name.num();
}
auto get_pg() const {
return pg;
}
auto& get_entity() const {
return entity_name;
}
auto& get_cookie() const {
return winfo.cookie;
}
auto& get_peer_addr() const {
return winfo.addr;
}
void cancel_notify(const uint64_t notify_id);
void do_watch_timeout();
};
using WatchRef = seastar::shared_ptr<Watch>;
struct notify_reply_t {
uint64_t watcher_gid;
uint64_t watcher_cookie;
ceph::bufferlist bl;
bool operator<(const notify_reply_t& rhs) const;
DENC(notify_reply_t, v, p) {
// there is no versioning / preamble
denc(v.watcher_gid, p);
denc(v.watcher_cookie, p);
denc(v.bl, p);
}
};
std::ostream &operator<<(std::ostream &out, const notify_reply_t &rhs);
class Notify : public seastar::enable_shared_from_this<Notify> {
std::set<WatchRef> watchers;
const notify_info_t ninfo;
crimson::net::ConnectionRef conn;
const uint64_t client_gid;
const uint64_t user_version;
bool complete{false};
bool discarded{false};
seastar::timer<seastar::lowres_clock> timeout_timer{
[this] { do_notify_timeout(); }
};
~Notify();
/// (gid,cookie) -> reply_bl for everyone who acked the notify
std::multiset<notify_reply_t> notify_replies;
uint64_t get_id() const { return ninfo.notify_id; }
/// Sends notify completion if watchers.empty() or timeout
seastar::future<> send_completion(
std::set<WatchRef> timedout_watchers = {});
/// Called on Notify timeout
void do_notify_timeout();
Notify(crimson::net::ConnectionRef conn,
const notify_info_t& ninfo,
const uint64_t client_gid,
const uint64_t user_version);
template <class WatchIteratorT>
Notify(WatchIteratorT begin,
WatchIteratorT end,
crimson::net::ConnectionRef conn,
const notify_info_t& ninfo,
const uint64_t client_gid,
const uint64_t user_version);
// this is a private tag for the public constructor that turns it into
// de facto private one. The motivation behind the hack is make_shared
// used by create_n_propagate factory.
struct private_ctag_t{};
using ptr_t = seastar::shared_ptr<Notify>;
friend bool operator<(const ptr_t& lhs, const ptr_t& rhs) {
assert(lhs);
assert(rhs);
return lhs->get_id() < rhs->get_id();
}
friend bool operator<(const ptr_t& ptr, const uint64_t id) {
assert(ptr);
return ptr->get_id() < id;
}
friend bool operator<(const uint64_t id, const ptr_t& ptr) {
assert(ptr);
return id < ptr->get_id();
}
friend Watch;
public:
template <class... Args>
Notify(private_ctag_t, Args&&... args) : Notify(std::forward<Args>(args)...) {
}
template <class WatchIteratorT, class... Args>
static seastar::future<> create_n_propagate(
WatchIteratorT begin,
WatchIteratorT end,
Args&&... args);
seastar::future<> remove_watcher(WatchRef watch);
seastar::future<> complete_watcher(WatchRef watch,
const ceph::bufferlist& reply_bl);
};
template <class WatchIteratorT>
Notify::Notify(WatchIteratorT begin,
WatchIteratorT end,
crimson::net::ConnectionRef conn,
const notify_info_t& ninfo,
const uint64_t client_gid,
const uint64_t user_version)
: watchers(begin, end),
ninfo(ninfo),
conn(std::move(conn)),
client_gid(client_gid),
user_version(user_version) {
assert(!std::empty(watchers));
if (ninfo.timeout) {
timeout_timer.arm(std::chrono::seconds{ninfo.timeout});
}
}
template <class WatchIteratorT, class... Args>
seastar::future<> Notify::create_n_propagate(
WatchIteratorT begin,
WatchIteratorT end,
Args&&... args)
{
static_assert(
std::is_same_v<typename std::iterator_traits<WatchIteratorT>::value_type,
crimson::osd::WatchRef>);
if (begin == end) {
auto notify = seastar::make_shared<Notify>(
private_ctag_t{},
std::forward<Args>(args)...);
return notify->send_completion();
} else {
auto notify = seastar::make_shared<Notify>(
private_ctag_t{},
begin, end,
std::forward<Args>(args)...);
return seastar::do_for_each(begin, end, [=] (auto& watchref) {
return watchref->start_notify(notify);
});
}
}
} // namespace crimson::osd
WRITE_CLASS_DENC(crimson::osd::notify_reply_t)
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::notify_reply_t> : fmt::ostream_formatter {};
#endif
| 7,146 | 26.809339 | 92 | h |
null | ceph-main/src/crimson/osd/osd_operations/background_recovery.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/statechart/event_base.hpp>
#include "crimson/net/Connection.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/recovery_backend.h"
#include "crimson/common/type_helpers.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/pg.h"
namespace crimson::osd {
class PG;
class ShardServices;
template <class T>
class BackgroundRecoveryT : public PhasedOperationT<T> {
public:
static constexpr OperationTypeCode type = OperationTypeCode::background_recovery;
BackgroundRecoveryT(
Ref<PG> pg,
ShardServices &ss,
epoch_t epoch_started,
crimson::osd::scheduler::scheduler_class_t scheduler_class, float delay = 0);
virtual void print(std::ostream &) const;
seastar::future<> start();
protected:
Ref<PG> pg;
const epoch_t epoch_started;
float delay = 0;
private:
virtual void dump_detail(Formatter *f) const;
crimson::osd::scheduler::params_t get_scheduler_params() const {
return {
1, // cost
0, // owner
scheduler_class
};
}
using do_recovery_ret_t = typename PhasedOperationT<T>::template interruptible_future<bool>;
virtual do_recovery_ret_t do_recovery() = 0;
ShardServices &ss;
const crimson::osd::scheduler::scheduler_class_t scheduler_class;
};
/// represent a recovery initiated for serving a client request
///
/// unlike @c PglogBasedRecovery and @c BackfillRecovery,
/// @c UrgentRecovery is not throttled by the scheduler. and it
/// utilizes @c RecoveryBackend directly to recover the unreadable
/// object.
class UrgentRecovery final : public BackgroundRecoveryT<UrgentRecovery> {
public:
UrgentRecovery(
const hobject_t& soid,
const eversion_t& need,
Ref<PG> pg,
ShardServices& ss,
epoch_t epoch_started);
void print(std::ostream&) const final;
std::tuple<
OperationThrottler::BlockingEvent,
RecoveryBackend::RecoveryBlockingEvent
> tracking_events;
private:
void dump_detail(Formatter* f) const final;
interruptible_future<bool> do_recovery() override;
const hobject_t soid;
const eversion_t need;
};
class PglogBasedRecovery final : public BackgroundRecoveryT<PglogBasedRecovery> {
public:
PglogBasedRecovery(
Ref<PG> pg,
ShardServices &ss,
epoch_t epoch_started,
float delay = 0);
std::tuple<
OperationThrottler::BlockingEvent,
RecoveryBackend::RecoveryBlockingEvent
> tracking_events;
private:
interruptible_future<bool> do_recovery() override;
};
class BackfillRecovery final : public BackgroundRecoveryT<BackfillRecovery> {
public:
template <class EventT>
BackfillRecovery(
Ref<PG> pg,
ShardServices &ss,
epoch_t epoch_started,
const EventT& evt);
PipelineHandle& get_handle() { return handle; }
std::tuple<
OperationThrottler::BlockingEvent,
PGPeeringPipeline::Process::BlockingEvent
> tracking_events;
private:
boost::intrusive_ptr<const boost::statechart::event_base> evt;
PipelineHandle handle;
static PGPeeringPipeline &peering_pp(PG &pg);
interruptible_future<bool> do_recovery() override;
};
template <class EventT>
BackfillRecovery::BackfillRecovery(
Ref<PG> pg,
ShardServices &ss,
const epoch_t epoch_started,
const EventT& evt)
: BackgroundRecoveryT(
std::move(pg),
ss,
epoch_started,
crimson::osd::scheduler::scheduler_class_t::background_best_effort),
evt(evt.intrusive_from_this())
{}
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::BackfillRecovery> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::osd::PglogBasedRecovery> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::osd::UrgentRecovery> : fmt::ostream_formatter {};
template <class T> struct fmt::formatter<crimson::osd::BackgroundRecoveryT<T>> : fmt::ostream_formatter {};
#endif
| 3,950 | 26.248276 | 107 | h |
null | ceph-main/src/crimson/osd/osd_operations/client_request.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include <boost/intrusive/list.hpp>
#include <boost/intrusive_ptr.hpp>
#include "osd/osd_op_util.h"
#include "crimson/net/Connection.h"
#include "crimson/osd/object_context.h"
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/osd_operations/client_request_common.h"
#include "crimson/osd/osd_operations/common/pg_pipeline.h"
#include "crimson/osd/pg_activation_blocker.h"
#include "crimson/osd/pg_map.h"
#include "crimson/common/type_helpers.h"
#include "crimson/common/utility.h"
#include "messages/MOSDOp.h"
namespace crimson::osd {
class PG;
class OSD;
class ShardServices;
class ClientRequest final : public PhasedOperationT<ClientRequest>,
private CommonClientRequest {
// Initially set to primary core, updated to pg core after move,
// used by put_historic
ShardServices *put_historic_shard_services = nullptr;
crimson::net::ConnectionRef conn;
// must be after conn due to ConnectionPipeline's life-time
Ref<MOSDOp> m;
OpInfo op_info;
seastar::promise<> on_complete;
unsigned instance_id = 0;
public:
class PGPipeline : public CommonPGPipeline {
public:
struct AwaitMap : OrderedExclusivePhaseT<AwaitMap> {
static constexpr auto type_name = "ClientRequest::PGPipeline::await_map";
} await_map;
struct WaitRepop : OrderedConcurrentPhaseT<WaitRepop> {
static constexpr auto type_name = "ClientRequest::PGPipeline::wait_repop";
} wait_repop;
struct SendReply : OrderedExclusivePhaseT<SendReply> {
static constexpr auto type_name = "ClientRequest::PGPipeline::send_reply";
} send_reply;
friend class ClientRequest;
friend class LttngBackend;
friend class HistoricBackend;
friend class ReqRequest;
friend class LogMissingRequest;
friend class LogMissingRequestReply;
};
/**
* instance_handle_t
*
* Client request is, at present, the only Operation which can be requeued.
* This is, mostly, fine. However, reusing the PipelineHandle or
* BlockingEvent structures before proving that the prior instance has stopped
* can create hangs or crashes due to violations of the BlockerT and
* PipelineHandle invariants.
*
* To solve this, we create an instance_handle_t which contains the events
* for the portion of execution that can be rerun as well as the
* PipelineHandle. ClientRequest::with_pg_int grabs a reference to the current
* instance_handle_t and releases its PipelineHandle in the finally block.
* On requeue, we create a new instance_handle_t with a fresh PipelineHandle
* and events tuple and use it and use it for the next invocation of
* with_pg_int.
*/
std::tuple<
StartEvent,
ConnectionPipeline::AwaitActive::BlockingEvent,
ConnectionPipeline::AwaitMap::BlockingEvent,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent,
ConnectionPipeline::GetPG::BlockingEvent,
PGMap::PGCreationBlockingEvent,
CompletionEvent
> tracking_events;
class instance_handle_t : public boost::intrusive_ref_counter<
instance_handle_t, boost::thread_unsafe_counter> {
public:
// intrusive_ptr because seastar::lw_shared_ptr includes a cpu debug check
// that we will fail since the core on which we allocate the request may not
// be the core on which we perform with_pg_int. This is harmless, since we
// don't leave any references on the source core, so we just bypass it by using
// intrusive_ptr instead.
using ref_t = boost::intrusive_ptr<instance_handle_t>;
PipelineHandle handle;
std::tuple<
PGPipeline::AwaitMap::BlockingEvent,
PG_OSDMapGate::OSDMapBlocker::BlockingEvent,
PGPipeline::WaitForActive::BlockingEvent,
PGActivationBlocker::BlockingEvent,
PGPipeline::RecoverMissing::BlockingEvent,
PGPipeline::GetOBC::BlockingEvent,
PGPipeline::Process::BlockingEvent,
PGPipeline::WaitRepop::BlockingEvent,
PGPipeline::SendReply::BlockingEvent,
CompletionEvent
> pg_tracking_events;
template <typename BlockingEventT, typename InterruptorT=void, typename F>
auto with_blocking_event(F &&f, ClientRequest &op) {
auto ret = std::forward<F>(f)(
typename BlockingEventT::template Trigger<ClientRequest>{
std::get<BlockingEventT>(pg_tracking_events), op
});
if constexpr (std::is_same_v<InterruptorT, void>) {
return ret;
} else {
using ret_t = decltype(ret);
return typename InterruptorT::template futurize_t<ret_t>{std::move(ret)};
}
}
template <typename InterruptorT=void, typename StageT>
auto enter_stage(StageT &stage, ClientRequest &op) {
return this->template with_blocking_event<
typename StageT::BlockingEvent,
InterruptorT>(
[&stage, this](auto &&trigger) {
return handle.template enter<ClientRequest>(
stage, std::move(trigger));
}, op);
}
template <
typename InterruptorT=void, typename BlockingObj, typename Method,
typename... Args>
auto enter_blocker(
ClientRequest &op, BlockingObj &obj, Method method, Args&&... args) {
return this->template with_blocking_event<
typename BlockingObj::Blocker::BlockingEvent,
InterruptorT>(
[&obj, method,
args=std::forward_as_tuple(std::move(args)...)](auto &&trigger) mutable {
return apply_method_to_tuple(
obj, method,
std::tuple_cat(
std::forward_as_tuple(std::move(trigger)),
std::move(args))
);
}, op);
}
};
instance_handle_t::ref_t instance_handle;
void reset_instance_handle() {
instance_handle = new instance_handle_t;
}
auto get_instance_handle() { return instance_handle; }
using ordering_hook_t = boost::intrusive::list_member_hook<>;
ordering_hook_t ordering_hook;
class Orderer {
using list_t = boost::intrusive::list<
ClientRequest,
boost::intrusive::member_hook<
ClientRequest,
typename ClientRequest::ordering_hook_t,
&ClientRequest::ordering_hook>
>;
list_t list;
public:
void add_request(ClientRequest &request) {
assert(!request.ordering_hook.is_linked());
intrusive_ptr_add_ref(&request);
list.push_back(request);
}
void remove_request(ClientRequest &request) {
assert(request.ordering_hook.is_linked());
list.erase(list_t::s_iterator_to(request));
intrusive_ptr_release(&request);
}
void requeue(ShardServices &shard_services, Ref<PG> pg);
void clear_and_cancel();
};
void complete_request();
static constexpr OperationTypeCode type = OperationTypeCode::client_request;
ClientRequest(
ShardServices &shard_services,
crimson::net::ConnectionRef, Ref<MOSDOp> &&m);
~ClientRequest();
void print(std::ostream &) const final;
void dump_detail(Formatter *f) const final;
static constexpr bool can_create() { return false; }
spg_t get_pgid() const {
return m->get_spg();
}
PipelineHandle &get_handle() { return instance_handle->handle; }
epoch_t get_epoch() const { return m->get_min_epoch(); }
ConnectionPipeline &get_connection_pipeline();
seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() {
assert(conn);
return conn.get_foreign(
).then([this](auto f_conn) {
conn.reset();
return f_conn;
});
}
void finish_remote_submission(crimson::net::ConnectionFRef _conn) {
assert(!conn);
conn = make_local_shared_foreign(std::move(_conn));
}
seastar::future<> with_pg_int(
ShardServices &shard_services, Ref<PG> pg);
public:
seastar::future<> with_pg(
ShardServices &shard_services, Ref<PG> pgref);
private:
template <typename FuncT>
interruptible_future<> with_sequencer(FuncT&& func);
auto reply_op_error(const Ref<PG>& pg, int err);
interruptible_future<> do_process(
instance_handle_t &ihref,
Ref<PG>& pg,
crimson::osd::ObjectContextRef obc);
::crimson::interruptible::interruptible_future<
::crimson::osd::IOInterruptCondition> process_pg_op(
Ref<PG> &pg);
::crimson::interruptible::interruptible_future<
::crimson::osd::IOInterruptCondition> process_op(
instance_handle_t &ihref,
Ref<PG> &pg);
bool is_pg_op() const;
PGPipeline &client_pp(PG &pg);
template <typename Errorator>
using interruptible_errorator =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
Errorator>;
bool is_misdirected(const PG& pg) const;
const SnapContext get_snapc(
Ref<PG>& pg,
crimson::osd::ObjectContextRef obc) const;
public:
friend class LttngBackend;
friend class HistoricBackend;
auto get_started() const {
return get_event<StartEvent>().get_timestamp();
};
auto get_completed() const {
return get_event<CompletionEvent>().get_timestamp();
};
void put_historic() const;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::ClientRequest> : fmt::ostream_formatter {};
#endif
| 9,103 | 31.283688 | 91 | h |
null | ceph-main/src/crimson/osd/osd_operations/client_request_common.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/common/operation.h"
#include "crimson/common/type_helpers.h"
#include "crimson/osd/osd_operation.h"
namespace crimson::osd {
struct CommonClientRequest {
static InterruptibleOperation::template interruptible_future<>
do_recover_missing(Ref<PG>& pg, const hobject_t& soid);
static bool should_abort_request(
const crimson::Operation& op, std::exception_ptr eptr);
};
} // namespace crimson::osd
| 539 | 24.714286 | 70 | h |
null | ceph-main/src/crimson/osd/osd_operations/internal_client_request.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/common/type_helpers.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/osd_operations/client_request_common.h"
#include "crimson/osd/osd_operations/common/pg_pipeline.h"
#include "crimson/osd/pg.h"
#include "crimson/osd/pg_activation_blocker.h"
namespace crimson::osd {
class InternalClientRequest : public PhasedOperationT<InternalClientRequest>,
private CommonClientRequest {
public:
explicit InternalClientRequest(Ref<PG> pg);
~InternalClientRequest();
// imposed by `ShardService::start_operation<T>(...)`.
seastar::future<> start();
protected:
virtual const hobject_t& get_target_oid() const = 0;
virtual PG::do_osd_ops_params_t get_do_osd_ops_params() const = 0;
virtual std::vector<OSDOp> create_osd_ops() = 0;
const PG& get_pg() const {
return *pg;
}
private:
friend OperationT<InternalClientRequest>;
static constexpr OperationTypeCode type =
OperationTypeCode::internal_client_request;
void print(std::ostream &) const final;
void dump_detail(Formatter *f) const final;
CommonPGPipeline& client_pp();
seastar::future<> do_process();
Ref<PG> pg;
OpInfo op_info;
PipelineHandle handle;
public:
PipelineHandle& get_handle() { return handle; }
std::tuple<
StartEvent,
CommonPGPipeline::WaitForActive::BlockingEvent,
PGActivationBlocker::BlockingEvent,
CommonPGPipeline::RecoverMissing::BlockingEvent,
CommonPGPipeline::GetOBC::BlockingEvent,
CommonPGPipeline::Process::BlockingEvent,
CompletionEvent
> tracking_events;
};
} // namespace crimson::osd
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::InternalClientRequest> : fmt::ostream_formatter {};
#endif
| 1,855 | 25.898551 | 99 | h |
null | ceph-main/src/crimson/osd/osd_operations/logmissing_request.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/net/Connection.h"
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/osd_operations/client_request.h"
#include "crimson/osd/pg_map.h"
#include "crimson/common/type_helpers.h"
#include "messages/MOSDPGUpdateLogMissing.h"
namespace ceph {
class Formatter;
}
namespace crimson::osd {
class ShardServices;
class OSD;
class PG;
class LogMissingRequest final : public PhasedOperationT<LogMissingRequest> {
public:
static constexpr OperationTypeCode type = OperationTypeCode::logmissing_request;
LogMissingRequest(crimson::net::ConnectionRef&&, Ref<MOSDPGUpdateLogMissing>&&);
void print(std::ostream &) const final;
void dump_detail(ceph::Formatter* f) const final;
static constexpr bool can_create() { return false; }
spg_t get_pgid() const {
return req->get_spg();
}
PipelineHandle &get_handle() { return handle; }
epoch_t get_epoch() const { return req->get_min_epoch(); }
ConnectionPipeline &get_connection_pipeline();
seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() {
assert(conn);
return conn.get_foreign(
).then([this](auto f_conn) {
conn.reset();
return f_conn;
});
}
void finish_remote_submission(crimson::net::ConnectionFRef _conn) {
assert(!conn);
conn = make_local_shared_foreign(std::move(_conn));
}
seastar::future<> with_pg(
ShardServices &shard_services, Ref<PG> pg);
std::tuple<
StartEvent,
ConnectionPipeline::AwaitActive::BlockingEvent,
ConnectionPipeline::AwaitMap::BlockingEvent,
ConnectionPipeline::GetPG::BlockingEvent,
PGMap::PGCreationBlockingEvent,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent
> tracking_events;
private:
ClientRequest::PGPipeline &client_pp(PG &pg);
crimson::net::ConnectionRef conn;
// must be after `conn` to ensure the ConnectionPipeline's is alive
PipelineHandle handle;
Ref<MOSDPGUpdateLogMissing> req;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::LogMissingRequest> : fmt::ostream_formatter {};
#endif
| 2,211 | 26.65 | 95 | h |
null | ceph-main/src/crimson/osd/osd_operations/logmissing_request_reply.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/net/Connection.h"
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/osd_operations/client_request.h"
#include "crimson/osd/pg_map.h"
#include "crimson/common/type_helpers.h"
#include "messages/MOSDPGUpdateLogMissingReply.h"
namespace ceph {
class Formatter;
}
namespace crimson::osd {
class ShardServices;
class OSD;
class PG;
class LogMissingRequestReply final : public PhasedOperationT<LogMissingRequestReply> {
public:
static constexpr OperationTypeCode type = OperationTypeCode::logmissing_request_reply;
LogMissingRequestReply(crimson::net::ConnectionRef&&, Ref<MOSDPGUpdateLogMissingReply>&&);
void print(std::ostream &) const final;
void dump_detail(ceph::Formatter* f) const final;
static constexpr bool can_create() { return false; }
spg_t get_pgid() const {
return req->get_spg();
}
PipelineHandle &get_handle() { return handle; }
epoch_t get_epoch() const { return req->get_min_epoch(); }
ConnectionPipeline &get_connection_pipeline();
seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() {
assert(conn);
return conn.get_foreign(
).then([this](auto f_conn) {
conn.reset();
return f_conn;
});
}
void finish_remote_submission(crimson::net::ConnectionFRef _conn) {
assert(!conn);
conn = make_local_shared_foreign(std::move(_conn));
}
seastar::future<> with_pg(
ShardServices &shard_services, Ref<PG> pg);
std::tuple<
StartEvent,
ConnectionPipeline::AwaitActive::BlockingEvent,
ConnectionPipeline::AwaitMap::BlockingEvent,
ConnectionPipeline::GetPG::BlockingEvent,
PGMap::PGCreationBlockingEvent,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent
> tracking_events;
private:
ClientRequest::PGPipeline &client_pp(PG &pg);
crimson::net::ConnectionRef conn;
// must be after `conn` to ensure the ConnectionPipeline's is alive
PipelineHandle handle;
Ref<MOSDPGUpdateLogMissingReply> req;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::LogMissingRequestReply> : fmt::ostream_formatter {};
#endif
| 2,252 | 27.1625 | 100 | h |
null | ceph-main/src/crimson/osd/osd_operations/osdop_params.h | #pragma once
#include "messages/MOSDOp.h"
#include "osd/osd_types.h"
#include "crimson/common/type_helpers.h"
// The fields in this struct are parameters that may be needed in multiple
// level of processing. I inclosed all those parameters in this struct to
// avoid passing each of them as a method parameter.
struct osd_op_params_t {
osd_reqid_t req_id;
utime_t mtime;
eversion_t at_version;
eversion_t pg_trim_to;
eversion_t min_last_complete_ondisk;
eversion_t last_complete;
version_t user_at_version = 0;
bool user_modify = false;
ObjectCleanRegions clean_regions;
osd_op_params_t() = default;
};
| 627 | 26.304348 | 74 | h |
null | ceph-main/src/crimson/osd/osd_operations/peering_event.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <seastar/core/future.hh>
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/osd_operation.h"
#include "osd/osd_types.h"
#include "osd/PGPeeringEvent.h"
#include "osd/PeeringState.h"
namespace ceph {
class Formatter;
}
namespace crimson::osd {
class OSD;
class ShardServices;
class PG;
class BackfillRecovery;
class PGPeeringPipeline {
struct AwaitMap : OrderedExclusivePhaseT<AwaitMap> {
static constexpr auto type_name = "PeeringEvent::PGPipeline::await_map";
} await_map;
struct Process : OrderedExclusivePhaseT<Process> {
static constexpr auto type_name = "PeeringEvent::PGPipeline::process";
} process;
template <class T>
friend class PeeringEvent;
friend class LocalPeeringEvent;
friend class RemotePeeringEvent;
friend class PGAdvanceMap;
friend class BackfillRecovery;
};
template <class T>
class PeeringEvent : public PhasedOperationT<T> {
T* that() {
return static_cast<T*>(this);
}
const T* that() const {
return static_cast<const T*>(this);
}
public:
static constexpr OperationTypeCode type = OperationTypeCode::peering_event;
protected:
PGPeeringPipeline &peering_pp(PG &pg);
PeeringCtx ctx;
pg_shard_t from;
spg_t pgid;
float delay = 0;
PGPeeringEvent evt;
const pg_shard_t get_from() const {
return from;
}
const spg_t get_pgid() const {
return pgid;
}
const PGPeeringEvent &get_event() const {
return evt;
}
virtual void on_pg_absent(ShardServices &);
virtual typename PeeringEvent::template interruptible_future<>
complete_rctx(ShardServices &, Ref<PG>);
virtual seastar::future<> complete_rctx_no_pg(
ShardServices &shard_services
) { return seastar::now();}
public:
template <typename... Args>
PeeringEvent(
const pg_shard_t &from, const spg_t &pgid,
Args&&... args) :
from(from),
pgid(pgid),
evt(std::forward<Args>(args)...)
{}
template <typename... Args>
PeeringEvent(
const pg_shard_t &from, const spg_t &pgid,
float delay, Args&&... args) :
from(from),
pgid(pgid),
delay(delay),
evt(std::forward<Args>(args)...)
{}
void print(std::ostream &) const final;
void dump_detail(ceph::Formatter* f) const final;
seastar::future<> with_pg(
ShardServices &shard_services, Ref<PG> pg);
};
class RemotePeeringEvent : public PeeringEvent<RemotePeeringEvent> {
protected:
crimson::net::ConnectionRef conn;
// must be after conn due to ConnectionPipeline's life-time
PipelineHandle handle;
void on_pg_absent(ShardServices &) final;
PeeringEvent::interruptible_future<> complete_rctx(
ShardServices &shard_services,
Ref<PG> pg) override;
seastar::future<> complete_rctx_no_pg(
ShardServices &shard_services
) override;
public:
class OSDPipeline {
struct AwaitActive : OrderedExclusivePhaseT<AwaitActive> {
static constexpr auto type_name =
"PeeringRequest::OSDPipeline::await_active";
} await_active;
friend class RemotePeeringEvent;
};
template <typename... Args>
RemotePeeringEvent(crimson::net::ConnectionRef conn, Args&&... args) :
PeeringEvent(std::forward<Args>(args)...),
conn(conn)
{}
std::tuple<
StartEvent,
ConnectionPipeline::AwaitActive::BlockingEvent,
ConnectionPipeline::AwaitMap::BlockingEvent,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent,
ConnectionPipeline::GetPG::BlockingEvent,
PGMap::PGCreationBlockingEvent,
PGPeeringPipeline::AwaitMap::BlockingEvent,
PG_OSDMapGate::OSDMapBlocker::BlockingEvent,
PGPeeringPipeline::Process::BlockingEvent,
OSDPipeline::AwaitActive::BlockingEvent,
CompletionEvent
> tracking_events;
static constexpr bool can_create() { return true; }
auto get_create_info() { return std::move(evt.create_info); }
spg_t get_pgid() const {
return pgid;
}
PipelineHandle &get_handle() { return handle; }
epoch_t get_epoch() const { return evt.get_epoch_sent(); }
ConnectionPipeline &get_connection_pipeline();
seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() {
assert(conn);
return conn.get_foreign(
).then([this](auto f_conn) {
conn.reset();
return f_conn;
});
}
void finish_remote_submission(crimson::net::ConnectionFRef _conn) {
assert(!conn);
conn = make_local_shared_foreign(std::move(_conn));
}
};
class LocalPeeringEvent final : public PeeringEvent<LocalPeeringEvent> {
protected:
Ref<PG> pg;
PipelineHandle handle;
public:
template <typename... Args>
LocalPeeringEvent(Ref<PG> pg, Args&&... args) :
PeeringEvent(std::forward<Args>(args)...),
pg(pg)
{}
seastar::future<> start();
virtual ~LocalPeeringEvent();
PipelineHandle &get_handle() { return handle; }
std::tuple<
StartEvent,
PGPeeringPipeline::AwaitMap::BlockingEvent,
PG_OSDMapGate::OSDMapBlocker::BlockingEvent,
PGPeeringPipeline::Process::BlockingEvent,
CompletionEvent
> tracking_events;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::LocalPeeringEvent> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::osd::RemotePeeringEvent> : fmt::ostream_formatter {};
template <class T> struct fmt::formatter<crimson::osd::PeeringEvent<T>> : fmt::ostream_formatter {};
#endif
| 5,450 | 25.206731 | 100 | h |
null | ceph-main/src/crimson/osd/osd_operations/pg_advance_map.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <seastar/core/future.hh>
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "osd/osd_types.h"
#include "crimson/common/type_helpers.h"
namespace ceph {
class Formatter;
}
namespace crimson::osd {
class ShardServices;
class PG;
class PGAdvanceMap : public PhasedOperationT<PGAdvanceMap> {
public:
static constexpr OperationTypeCode type = OperationTypeCode::pg_advance_map;
protected:
ShardServices &shard_services;
Ref<PG> pg;
PipelineHandle handle;
std::optional<epoch_t> from;
epoch_t to;
PeeringCtx rctx;
const bool do_init;
public:
PGAdvanceMap(
ShardServices &shard_services, Ref<PG> pg, epoch_t to,
PeeringCtx &&rctx, bool do_init);
~PGAdvanceMap();
void print(std::ostream &) const final;
void dump_detail(ceph::Formatter *f) const final;
seastar::future<> start();
PipelineHandle &get_handle() { return handle; }
std::tuple<
PGPeeringPipeline::Process::BlockingEvent
> tracking_events;
private:
PGPeeringPipeline &peering_pp(PG &pg);
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::PGAdvanceMap> : fmt::ostream_formatter {};
#endif
| 1,325 | 20.387097 | 90 | h |
null | ceph-main/src/crimson/osd/osd_operations/recovery_subrequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "osd/osd_op_util.h"
#include "crimson/net/Connection.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/pg.h"
#include "crimson/common/type_helpers.h"
#include "messages/MOSDFastDispatchOp.h"
namespace crimson::osd {
class PG;
class RecoverySubRequest final : public PhasedOperationT<RecoverySubRequest> {
public:
static constexpr OperationTypeCode type =
OperationTypeCode::background_recovery_sub;
RecoverySubRequest(
crimson::net::ConnectionRef conn,
Ref<MOSDFastDispatchOp>&& m)
: conn(conn), m(m) {}
void print(std::ostream& out) const final
{
out << *m;
}
void dump_detail(Formatter *f) const final
{
}
static constexpr bool can_create() { return false; }
spg_t get_pgid() const {
return m->get_spg();
}
PipelineHandle &get_handle() { return handle; }
epoch_t get_epoch() const { return m->get_min_epoch(); }
ConnectionPipeline &get_connection_pipeline();
seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() {
assert(conn);
return conn.get_foreign(
).then([this](auto f_conn) {
conn.reset();
return f_conn;
});
}
void finish_remote_submission(crimson::net::ConnectionFRef _conn) {
assert(!conn);
conn = make_local_shared_foreign(std::move(_conn));
}
seastar::future<> with_pg(
ShardServices &shard_services, Ref<PG> pg);
std::tuple<
StartEvent,
ConnectionPipeline::AwaitActive::BlockingEvent,
ConnectionPipeline::AwaitMap::BlockingEvent,
ConnectionPipeline::GetPG::BlockingEvent,
PGMap::PGCreationBlockingEvent,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent,
CompletionEvent
> tracking_events;
private:
crimson::net::ConnectionRef conn;
// must be after `conn` to ensure the ConnectionPipeline's is alive
PipelineHandle handle;
Ref<MOSDFastDispatchOp> m;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::RecoverySubRequest> : fmt::ostream_formatter {};
#endif
| 2,106 | 24.695122 | 96 | h |
null | ceph-main/src/crimson/osd/osd_operations/replicated_request.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/net/Connection.h"
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/pg_map.h"
#include "crimson/osd/osd_operations/client_request.h"
#include "crimson/common/type_helpers.h"
#include "messages/MOSDRepOp.h"
namespace ceph {
class Formatter;
}
namespace crimson::osd {
class ShardServices;
class OSD;
class PG;
class RepRequest final : public PhasedOperationT<RepRequest> {
public:
static constexpr OperationTypeCode type = OperationTypeCode::replicated_request;
RepRequest(crimson::net::ConnectionRef&&, Ref<MOSDRepOp>&&);
void print(std::ostream &) const final;
void dump_detail(ceph::Formatter* f) const final;
static constexpr bool can_create() { return false; }
spg_t get_pgid() const {
return req->get_spg();
}
PipelineHandle &get_handle() { return handle; }
epoch_t get_epoch() const { return req->get_min_epoch(); }
ConnectionPipeline &get_connection_pipeline();
seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() {
assert(conn);
return conn.get_foreign(
).then([this](auto f_conn) {
conn.reset();
return f_conn;
});
}
void finish_remote_submission(crimson::net::ConnectionFRef _conn) {
assert(!conn);
conn = make_local_shared_foreign(std::move(_conn));
}
seastar::future<> with_pg(
ShardServices &shard_services, Ref<PG> pg);
std::tuple<
StartEvent,
ConnectionPipeline::AwaitActive::BlockingEvent,
ConnectionPipeline::AwaitMap::BlockingEvent,
ConnectionPipeline::GetPG::BlockingEvent,
ClientRequest::PGPipeline::AwaitMap::BlockingEvent,
PG_OSDMapGate::OSDMapBlocker::BlockingEvent,
PGMap::PGCreationBlockingEvent,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent
> tracking_events;
private:
ClientRequest::PGPipeline &client_pp(PG &pg);
crimson::net::ConnectionRef conn;
PipelineHandle handle;
Ref<MOSDRepOp> req;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::RepRequest> : fmt::ostream_formatter {};
#endif
| 2,179 | 25.91358 | 88 | h |
null | ceph-main/src/crimson/osd/osd_operations/snaptrim_event.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <seastar/core/future.hh>
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/osd_operations/common/pg_pipeline.h"
#include "crimson/osd/pg.h"
#include "crimson/osd/pg_activation_blocker.h"
#include "osd/osd_types.h"
#include "osd/PGPeeringEvent.h"
#include "osd/PeeringState.h"
namespace ceph {
class Formatter;
}
class SnapMapper;
namespace crimson::osd {
class OSD;
class ShardServices;
class PG;
// trim up to `max` objects for snapshot `snapid
class SnapTrimEvent final : public PhasedOperationT<SnapTrimEvent> {
public:
using remove_or_update_ertr =
crimson::errorator<crimson::ct_error::enoent>;
using remove_or_update_iertr =
crimson::interruptible::interruptible_errorator<
IOInterruptCondition, remove_or_update_ertr>;
using snap_trim_ertr = remove_or_update_ertr::extend<
crimson::ct_error::eagain>;
using snap_trim_iertr = remove_or_update_iertr::extend<
crimson::ct_error::eagain>;
static constexpr OperationTypeCode type = OperationTypeCode::snaptrim_event;
SnapTrimEvent(Ref<PG> pg,
SnapMapper& snap_mapper,
const snapid_t snapid,
const bool needs_pause)
: pg(std::move(pg)),
snap_mapper(snap_mapper),
snapid(snapid),
needs_pause(needs_pause) {}
void print(std::ostream &) const final;
void dump_detail(ceph::Formatter* f) const final;
snap_trim_ertr::future<seastar::stop_iteration> start();
snap_trim_ertr::future<seastar::stop_iteration> with_pg(
ShardServices &shard_services, Ref<PG> pg);
private:
CommonPGPipeline& client_pp();
// bases on 998cb8c141bb89aafae298a9d5e130fbd78fe5f2
struct SubOpBlocker : crimson::BlockerT<SubOpBlocker> {
static constexpr const char* type_name = "CompoundOpBlocker";
using id_done_t = std::pair<crimson::Operation::id_t,
remove_or_update_iertr::future<>>;
void dump_detail(Formatter *f) const final;
template <class... Args>
void emplace_back(Args&&... args);
remove_or_update_iertr::future<> wait_completion();
private:
std::vector<id_done_t> subops;
} subop_blocker;
// we don't need to synchronize with other instances of SnapTrimEvent;
// it's here for the sake of op tracking.
struct WaitSubop : OrderedConcurrentPhaseT<WaitSubop> {
static constexpr auto type_name = "SnapTrimEvent::wait_subop";
} wait_subop;
// an instantiator can instruct us to go over this stage and then
// wait for the future to implement throttling. It is implemented
// that way to for the sake of tracking ops.
struct WaitTrimTimer : OrderedExclusivePhaseT<WaitTrimTimer> {
static constexpr auto type_name = "SnapTrimEvent::wait_trim_timer";
} wait_trim_timer;
PipelineHandle handle;
Ref<PG> pg;
SnapMapper& snap_mapper;
const snapid_t snapid;
const bool needs_pause;
public:
PipelineHandle& get_handle() { return handle; }
std::tuple<
StartEvent,
CommonPGPipeline::WaitForActive::BlockingEvent,
PGActivationBlocker::BlockingEvent,
CommonPGPipeline::RecoverMissing::BlockingEvent,
CommonPGPipeline::GetOBC::BlockingEvent,
CommonPGPipeline::Process::BlockingEvent,
WaitSubop::BlockingEvent,
WaitTrimTimer::BlockingEvent,
CompletionEvent
> tracking_events;
};
// remove single object. a SnapTrimEvent can create multiple subrequests.
// the division of labour is needed because of the restriction that an Op
// cannot revisite a pipeline's stage it already saw.
class SnapTrimObjSubEvent : public PhasedOperationT<SnapTrimObjSubEvent> {
public:
using remove_or_update_ertr =
crimson::errorator<crimson::ct_error::enoent>;
using remove_or_update_iertr =
crimson::interruptible::interruptible_errorator<
IOInterruptCondition, remove_or_update_ertr>;
static constexpr OperationTypeCode type =
OperationTypeCode::snaptrimobj_subevent;
SnapTrimObjSubEvent(
Ref<PG> pg,
const hobject_t& coid,
snapid_t snap_to_trim)
: pg(std::move(pg)),
coid(coid),
snap_to_trim(snap_to_trim) {
}
void print(std::ostream &) const final;
void dump_detail(ceph::Formatter* f) const final;
remove_or_update_iertr::future<> start();
remove_or_update_iertr::future<> with_pg(
ShardServices &shard_services, Ref<PG> pg);
CommonPGPipeline& client_pp();
private:
object_stat_sum_t delta_stats;
remove_or_update_iertr::future<> remove_clone(
ObjectContextRef obc,
ObjectContextRef head_obc,
ceph::os::Transaction& txn,
std::vector<pg_log_entry_t>& log_entries);
void remove_head_whiteout(
ObjectContextRef obc,
ObjectContextRef head_obc,
ceph::os::Transaction& txn,
std::vector<pg_log_entry_t>& log_entries);
interruptible_future<> adjust_snaps(
ObjectContextRef obc,
ObjectContextRef head_obc,
const std::set<snapid_t>& new_snaps,
ceph::os::Transaction& txn,
std::vector<pg_log_entry_t>& log_entries);
void update_head(
ObjectContextRef obc,
ObjectContextRef head_obc,
ceph::os::Transaction& txn,
std::vector<pg_log_entry_t>& log_entries);
using remove_or_update_ret_t =
std::pair<ceph::os::Transaction, std::vector<pg_log_entry_t>>;
remove_or_update_iertr::future<remove_or_update_ret_t>
remove_or_update(ObjectContextRef obc, ObjectContextRef head_obc);
// we don't need to synchronize with other instances started by
// SnapTrimEvent; it's here for the sake of op tracking.
struct WaitRepop : OrderedConcurrentPhaseT<WaitRepop> {
static constexpr auto type_name = "SnapTrimObjSubEvent::wait_repop";
} wait_repop;
Ref<PG> pg;
PipelineHandle handle;
osd_op_params_t osd_op_p;
const hobject_t coid;
const snapid_t snap_to_trim;
public:
PipelineHandle& get_handle() { return handle; }
std::tuple<
StartEvent,
CommonPGPipeline::WaitForActive::BlockingEvent,
PGActivationBlocker::BlockingEvent,
CommonPGPipeline::RecoverMissing::BlockingEvent,
CommonPGPipeline::GetOBC::BlockingEvent,
CommonPGPipeline::Process::BlockingEvent,
WaitRepop::BlockingEvent,
CompletionEvent
> tracking_events;
};
} // namespace crimson::osd
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::SnapTrimEvent> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::osd::SnapTrimObjSubEvent> : fmt::ostream_formatter {};
#endif
| 6,538 | 30.287081 | 97 | h |
null | ceph-main/src/crimson/osd/osd_operations/common/pg_pipeline.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "osd/osd_op_util.h"
#include "crimson/osd/osd_operation.h"
namespace crimson::osd {
class CommonPGPipeline {
protected:
friend class InternalClientRequest;
friend class SnapTrimEvent;
friend class SnapTrimObjSubEvent;
struct WaitForActive : OrderedExclusivePhaseT<WaitForActive> {
static constexpr auto type_name = "CommonPGPipeline:::wait_for_active";
} wait_for_active;
struct RecoverMissing : OrderedExclusivePhaseT<RecoverMissing> {
static constexpr auto type_name = "CommonPGPipeline::recover_missing";
} recover_missing;
struct GetOBC : OrderedExclusivePhaseT<GetOBC> {
static constexpr auto type_name = "CommonPGPipeline::get_obc";
} get_obc;
struct Process : OrderedExclusivePhaseT<Process> {
static constexpr auto type_name = "CommonPGPipeline::process";
} process;
};
} // namespace crimson::osd
| 970 | 29.34375 | 75 | h |
null | ceph-main/src/crimson/osd/scheduler/mclock_scheduler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <ostream>
#include <map>
#include <vector>
#include "boost/variant.hpp"
#include "dmclock/src/dmclock_server.h"
#include "crimson/osd/scheduler/scheduler.h"
#include "common/config.h"
#include "common/ceph_context.h"
namespace crimson::osd::scheduler {
using client_id_t = uint64_t;
using profile_id_t = uint64_t;
struct client_profile_id_t {
client_id_t client_id;
profile_id_t profile_id;
auto operator<=>(const client_profile_id_t&) const = default;
};
struct scheduler_id_t {
scheduler_class_t class_id;
client_profile_id_t client_profile_id;
auto operator<=>(const scheduler_id_t&) const = default;
};
/**
* Scheduler implementation based on mclock.
*
* TODO: explain configs
*/
class mClockScheduler : public Scheduler, md_config_obs_t {
class ClientRegistry {
std::array<
crimson::dmclock::ClientInfo,
static_cast<size_t>(scheduler_class_t::client)
> internal_client_infos = {
// Placeholder, gets replaced with configured values
crimson::dmclock::ClientInfo(1, 1, 1),
crimson::dmclock::ClientInfo(1, 1, 1)
};
crimson::dmclock::ClientInfo default_external_client_info = {1, 1, 1};
std::map<client_profile_id_t,
crimson::dmclock::ClientInfo> external_client_infos;
const crimson::dmclock::ClientInfo *get_external_client(
const client_profile_id_t &client) const;
public:
void update_from_config(const ConfigProxy &conf);
const crimson::dmclock::ClientInfo *get_info(
const scheduler_id_t &id) const;
} client_registry;
using mclock_queue_t = crimson::dmclock::PullPriorityQueue<
scheduler_id_t,
item_t,
true,
true,
2>;
mclock_queue_t scheduler;
std::list<item_t> immediate;
static scheduler_id_t get_scheduler_id(const item_t &item) {
return scheduler_id_t{
item.params.klass,
client_profile_id_t{
item.params.owner,
0
}
};
}
public:
mClockScheduler(ConfigProxy &conf);
// Enqueue op in the back of the regular queue
void enqueue(item_t &&item) final;
// Enqueue the op in the front of the regular queue
void enqueue_front(item_t &&item) final;
// Return an op to be dispatch
item_t dequeue() final;
// Returns if the queue is empty
bool empty() const final {
return immediate.empty() && scheduler.empty();
}
// Formatted output of the queue
void dump(ceph::Formatter &f) const final;
void print(std::ostream &ostream) const final {
ostream << "mClockScheduler";
}
const char** get_tracked_conf_keys() const final;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed) final;
};
}
| 3,071 | 23.380952 | 74 | h |
null | ceph-main/src/crimson/osd/scheduler/scheduler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <seastar/core/future.hh>
#include <ostream>
#include "crimson/common/config_proxy.h"
namespace crimson::osd::scheduler {
enum class scheduler_class_t : uint8_t {
background_best_effort = 0,
background_recovery,
client,
repop,
immediate,
};
std::ostream &operator<<(std::ostream &, const scheduler_class_t &);
using client_t = uint64_t;
using cost_t = uint64_t;
struct params_t {
cost_t cost = 1;
client_t owner;
scheduler_class_t klass;
};
struct item_t {
params_t params;
seastar::promise<> wake;
};
/**
* Base interface for classes responsible for choosing
* op processing order in the OSD.
*/
class Scheduler {
public:
// Enqueue op for scheduling
virtual void enqueue(item_t &&item) = 0;
// Enqueue op for processing as though it were enqueued prior
// to other items already scheduled.
virtual void enqueue_front(item_t &&item) = 0;
// Returns true iff there are no ops scheduled
virtual bool empty() const = 0;
// Return next op to be processed
virtual item_t dequeue() = 0;
// Dump formatted representation for the queue
virtual void dump(ceph::Formatter &f) const = 0;
// Print human readable brief description with relevant parameters
virtual void print(std::ostream &out) const = 0;
// Destructor
virtual ~Scheduler() {};
};
std::ostream &operator<<(std::ostream &lhs, const Scheduler &);
using SchedulerRef = std::unique_ptr<Scheduler>;
SchedulerRef make_scheduler(ConfigProxy &);
}
| 1,891 | 21.795181 | 70 | h |
null | ceph-main/src/crimson/tools/store_nbd/block_driver.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/parsers.hpp>
#include <seastar/core/future.hh>
#include <string>
#include <optional>
#include "include/buffer.h"
/**
* BlockDriver
*
* Simple interface to enable throughput test to compare raw disk to
* transaction_manager, etc
*/
class BlockDriver {
public:
struct config_t {
std::string type;
bool mkfs = false;
unsigned num_pgs = 128;
unsigned log_size = 1000;
unsigned object_size = 4<<20 /* 4MB, rbd default */;
unsigned oi_size = 1<<9 /* 512b */;
unsigned log_entry_size = 1<<9 /* 512b */;
bool prepopulate_log = false;
std::optional<std::string> path;
bool is_futurized_store() const {
return type == "seastore" || type == "bluestore";
}
std::string get_fs_type() const {
ceph_assert(is_futurized_store());
return type;
}
bool oi_enabled() const {
return oi_size > 0;
}
bool log_enabled() const {
return log_entry_size > 0 && log_size > 0;
}
bool prepopulate_log_enabled() const {
return prepopulate_log;
}
void populate_options(
boost::program_options::options_description &desc)
{
namespace po = boost::program_options;
desc.add_options()
("type",
po::value<std::string>()
->default_value("transaction_manager")
->notifier([this](auto s) { type = s; }),
"Backend to use, options are transaction_manager, seastore"
)
("device-path",
po::value<std::string>()
->required()
->notifier([this](auto s) { path = s; }),
"Path to device for backend"
)
("num-pgs",
po::value<unsigned>()
->notifier([this](auto s) { num_pgs = s; }),
"Number of pgs to use for futurized_store backends"
)
("log-size",
po::value<unsigned>()
->notifier([this](auto s) { log_size = s; }),
"Number of log entries per pg to use for futurized_store backends"
", 0 to disable"
)
("log-entry-size",
po::value<unsigned>()
->notifier([this](auto s) { log_entry_size = s; }),
"Size of each log entry per pg to use for futurized_store backends"
", 0 to disable"
)
("prepopulate-log",
po::value<bool>()
->notifier([this](auto s) { prepopulate_log = s; }),
"Prepopulate log on mount"
)
("object-info-size",
po::value<unsigned>()
->notifier([this](auto s) { log_entry_size = s; }),
"Size of each log entry per pg to use for futurized_store backends"
", 0 to disable"
)
("object-size",
po::value<unsigned>()
->notifier([this](auto s) { object_size = s; }),
"Object size to use for futurized_store backends"
)
("mkfs",
po::value<bool>()
->default_value(false)
->notifier([this](auto s) { mkfs = s; }),
"Do mkfs first"
);
}
};
virtual ceph::bufferptr get_buffer(size_t size) = 0;
virtual seastar::future<> write(
off_t offset,
ceph::bufferptr ptr) = 0;
virtual seastar::future<ceph::bufferlist> read(
off_t offset,
size_t size) = 0;
virtual size_t get_size() const = 0;
virtual seastar::future<> mount() = 0;
virtual seastar::future<> close() = 0;
virtual ~BlockDriver() {}
};
using BlockDriverRef = std::unique_ptr<BlockDriver>;
BlockDriverRef get_backend(BlockDriver::config_t config);
| 3,328 | 23.659259 | 70 | h |
null | ceph-main/src/crimson/tools/store_nbd/tm_driver.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "block_driver.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/device.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "test/crimson/seastore/test_block.h"
class TMDriver final : public BlockDriver {
public:
TMDriver(config_t config) : config(config) {}
~TMDriver() final {}
bufferptr get_buffer(size_t size) final {
return ceph::buffer::create_page_aligned(size);
}
seastar::future<> write(
off_t offset,
bufferptr ptr) final;
seastar::future<bufferlist> read(
off_t offset,
size_t size) final;
size_t get_size() const final;
seastar::future<> mount() final;
seastar::future<> close() final;
private:
const config_t config;
using DeviceRef = crimson::os::seastore::DeviceRef;
DeviceRef device;
using TransactionManager = crimson::os::seastore::TransactionManager;
using TransactionManagerRef = crimson::os::seastore::TransactionManagerRef;
TransactionManagerRef tm;
seastar::future<> mkfs();
void init();
void clear();
using read_extents_iertr = TransactionManager::read_extent_iertr;
using read_extents_ret = read_extents_iertr::future<
crimson::os::seastore::lextent_list_t<crimson::os::seastore::TestBlock>
>;
read_extents_ret read_extents(
crimson::os::seastore::Transaction &t,
crimson::os::seastore::laddr_t offset,
crimson::os::seastore::extent_len_t length);
};
| 1,513 | 25.561404 | 77 | h |
null | ceph-main/src/crush/CrushCompiler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CRUSH_COMPILER_H
#define CEPH_CRUSH_COMPILER_H
#include "crush/CrushWrapper.h"
#include "crush/grammar.h"
#include <map>
#include <iostream>
class CrushCompiler {
CrushWrapper& crush;
std::ostream& err;
int verbose;
bool unsafe_tunables;
// decompile
enum dcb_state_t {
DCB_STATE_IN_PROGRESS = 0,
DCB_STATE_DONE
};
int decompile_weight_set_weights(crush_weight_set weight_set,
std::ostream &out);
int decompile_weight_set(crush_weight_set *weight_set,
__u32 size,
std::ostream &out);
int decompile_choose_arg(crush_choose_arg *arg,
int bucket_id,
std::ostream &out);
int decompile_ids(int *ids,
__u32 size,
std::ostream &out);
int decompile_choose_arg_map(crush_choose_arg_map arg_map,
std::ostream &out);
int decompile_choose_args(const std::pair<const long unsigned int, crush_choose_arg_map> &i,
std::ostream &out);
int decompile_bucket_impl(int i, std::ostream &out);
int decompile_bucket(int cur,
std::map<int, dcb_state_t>& dcb_states,
std::ostream &out);
// compile
typedef char const* iterator_t;
typedef boost::spirit::tree_match<iterator_t> parse_tree_match_t;
typedef parse_tree_match_t::tree_iterator iter_t;
typedef parse_tree_match_t::node_t node_t;
std::map<std::string, int> item_id;
std::map<int, std::string> id_item;
std::map<int, unsigned> item_weight;
std::map<std::string, int> type_id;
std::map<std::string, int> rule_id;
std::map<int32_t, std::map<int32_t, int32_t> > class_bucket; // bucket id -> class id -> shadow bucket id
std::string string_node(node_t &node);
int int_node(node_t &node);
float float_node(node_t &node);
int parse_tunable(iter_t const& i);
int parse_device(iter_t const& i);
int parse_bucket_type(iter_t const& i);
int parse_bucket(iter_t const& i);
int parse_rule(iter_t const& i);
int parse_weight_set_weights(iter_t const& i, int bucket_id, crush_weight_set *weight_set);
int parse_weight_set(iter_t const& i, int bucket_id, crush_choose_arg *arg);
int parse_choose_arg_ids(iter_t const& i, int bucket_id, crush_choose_arg *args);
int parse_choose_arg(iter_t const& i, crush_choose_arg *args);
int parse_choose_args(iter_t const& i);
void find_used_bucket_ids(iter_t const& i);
int parse_crush(iter_t const& i);
void dump(iter_t const& i, int ind=1);
std::string consolidate_whitespace(std::string in);
int adjust_bucket_item_place(iter_t const &i);
public:
CrushCompiler(CrushWrapper& c, std::ostream& eo, int verbosity=0)
: crush(c), err(eo), verbose(verbosity),
unsafe_tunables(false) {}
~CrushCompiler() {}
void enable_unsafe_tunables() {
unsafe_tunables = true;
}
int decompile(std::ostream& out);
int compile(std::istream& in, const char *infn=0);
};
#endif
| 2,949 | 30.72043 | 107 | h |
null | ceph-main/src/crush/CrushLocation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CRUSH_LOCATION_H
#define CEPH_CRUSH_LOCATION_H
#include <iosfwd>
#include <map>
#include <string>
#if FMT_VERSION >= 90000
#include <fmt/ostream.h>
#endif
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
namespace ceph::crush {
class CrushLocation {
public:
explicit CrushLocation(CephContext *c) : cct(c) {
init_on_startup();
}
int update_from_conf(); ///< refresh from config
int update_from_hook(); ///< call hook, if present
int init_on_startup();
std::multimap<std::string,std::string> get_location() const;
private:
int _parse(const std::string& s);
CephContext *cct;
std::multimap<std::string,std::string> loc;
mutable ceph::mutex lock = ceph::make_mutex("CrushLocation");
};
std::ostream& operator<<(std::ostream& os, const CrushLocation& loc);
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<ceph::crush::CrushLocation> : fmt::ostream_formatter {};
#endif
#endif
| 1,047 | 21.782609 | 90 | h |
null | ceph-main/src/crush/CrushTreeDumper.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2015 Mirantis Inc
*
* Author: Mykola Golub <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CRUSH_TREE_DUMPER_H
#define CRUSH_TREE_DUMPER_H
#include "CrushWrapper.h"
#include "include/stringify.h"
/**
* CrushTreeDumper:
* A helper class and functions to dump a crush tree.
*
* Example:
*
* class SimpleDumper : public CrushTreeDumper::Dumper<ostream> {
* public:
* SimpleDumper(const CrushWrapper *crush) :
* CrushTreeDumper::Dumper<ostream>(crush) {}
* protected:
* virtual void dump_item(const CrushTreeDumper::Item &qi, ostream *out) {
* *out << qi.id;
* for (int k = 0; k < qi.depth; k++)
* *out << "-";
* if (qi.is_bucket())
* *out << crush->get_item_name(qi.id)
* else
* *out << "osd." << qi.id;
* *out << "\n";
* }
* };
*
* SimpleDumper(crush).dump(out);
*
*/
namespace CrushTreeDumper {
struct Item {
int id;
int parent;
int depth;
float weight;
std::list<int> children;
Item() : id(0), parent(0), depth(0), weight(0) {}
Item(int i, int p, int d, float w) : id(i), parent(p), depth(d), weight(w) {}
bool is_bucket() const { return id < 0; }
};
template <typename F>
class Dumper : public std::list<Item> {
public:
explicit Dumper(const CrushWrapper *crush_,
const name_map_t& weight_set_names_)
: crush(crush_), weight_set_names(weight_set_names_) {
crush->find_nonshadow_roots(&roots);
root = roots.begin();
}
explicit Dumper(const CrushWrapper *crush_,
const name_map_t& weight_set_names_,
bool show_shadow)
: crush(crush_), weight_set_names(weight_set_names_) {
if (show_shadow) {
crush->find_roots(&roots);
} else {
crush->find_nonshadow_roots(&roots);
}
root = roots.begin();
}
virtual ~Dumper() {}
virtual void reset() {
root = roots.begin();
touched.clear();
clear();
}
virtual bool should_dump_leaf(int i) const {
return true;
}
virtual bool should_dump_empty_bucket() const {
return true;
}
bool should_dump(int id) {
if (id >= 0)
return should_dump_leaf(id);
if (should_dump_empty_bucket())
return true;
int s = crush->get_bucket_size(id);
for (int k = s - 1; k >= 0; k--) {
int c = crush->get_bucket_item(id, k);
if (should_dump(c))
return true;
}
return false;
}
bool next(Item &qi) {
if (empty()) {
while (root != roots.end() && !should_dump(*root))
++root;
if (root == roots.end())
return false;
push_back(Item(*root, 0, 0, crush->get_bucket_weightf(*root)));
++root;
}
qi = front();
pop_front();
touched.insert(qi.id);
if (qi.is_bucket()) {
// queue bucket contents, sorted by (class, name)
int s = crush->get_bucket_size(qi.id);
std::map<std::string, std::pair<int,float>> sorted;
for (int k = s - 1; k >= 0; k--) {
int id = crush->get_bucket_item(qi.id, k);
if (should_dump(id)) {
std::string sort_by;
if (id >= 0) {
const char *c = crush->get_item_class(id);
sort_by = c ? c : "";
sort_by += "_";
char nn[80];
snprintf(nn, sizeof(nn), "osd.%08d", id);
sort_by += nn;
} else {
sort_by = "_";
sort_by += crush->get_item_name(id);
}
sorted[sort_by] = std::make_pair(
id, crush->get_bucket_item_weightf(qi.id, k));
}
}
for (auto p = sorted.rbegin(); p != sorted.rend(); ++p) {
qi.children.push_back(p->second.first);
push_front(Item(p->second.first, qi.id, qi.depth + 1,
p->second.second));
}
}
return true;
}
void dump(F *f) {
reset();
Item qi;
while (next(qi))
dump_item(qi, f);
}
bool is_touched(int id) const { return touched.count(id) > 0; }
void set_root(const std::string& bucket) {
roots.clear();
if (crush->name_exists(bucket)) {
int i = crush->get_item_id(bucket);
roots.insert(i);
}
}
protected:
virtual void dump_item(const Item &qi, F *f) = 0;
protected:
const CrushWrapper *crush;
const name_map_t &weight_set_names;
private:
std::set<int> touched;
std::set<int> roots;
std::set<int>::iterator root;
};
inline void dump_item_fields(const CrushWrapper *crush,
const name_map_t& weight_set_names,
const Item &qi, ceph::Formatter *f) {
f->dump_int("id", qi.id);
const char *c = crush->get_item_class(qi.id);
if (c)
f->dump_string("device_class", c);
if (qi.is_bucket()) {
int type = crush->get_bucket_type(qi.id);
f->dump_string("name", crush->get_item_name(qi.id));
f->dump_string("type", crush->get_type_name(type));
f->dump_int("type_id", type);
} else {
f->dump_stream("name") << "osd." << qi.id;
f->dump_string("type", crush->get_type_name(0));
f->dump_int("type_id", 0);
f->dump_float("crush_weight", qi.weight);
f->dump_unsigned("depth", qi.depth);
}
if (qi.parent < 0) {
f->open_object_section("pool_weights");
for (auto& p : crush->choose_args) {
const crush_choose_arg_map& cmap = p.second;
int bidx = -1 - qi.parent;
const crush_bucket *b = crush->get_bucket(qi.parent);
if (b &&
bidx < (int)cmap.size &&
cmap.args[bidx].weight_set &&
cmap.args[bidx].weight_set_positions >= 1) {
int bpos;
for (bpos = 0;
bpos < (int)cmap.args[bidx].weight_set[0].size &&
b->items[bpos] != qi.id;
++bpos) ;
std::string name;
if (p.first == CrushWrapper::DEFAULT_CHOOSE_ARGS) {
name = "(compat)";
} else {
auto q = weight_set_names.find(p.first);
name = q != weight_set_names.end() ? q->second :
stringify(p.first);
}
f->open_array_section(name.c_str());
for (unsigned opos = 0;
opos < cmap.args[bidx].weight_set_positions;
++opos) {
float w = (float)cmap.args[bidx].weight_set[opos].weights[bpos] /
(float)0x10000;
f->dump_float("weight", w);
}
f->close_section();
}
}
f->close_section();
}
}
inline void dump_bucket_children(const CrushWrapper *crush,
const Item &qi, ceph::Formatter *f) {
if (!qi.is_bucket())
return;
f->open_array_section("children");
for (std::list<int>::const_iterator i = qi.children.begin();
i != qi.children.end();
++i) {
f->dump_int("child", *i);
}
f->close_section();
}
class FormattingDumper : public Dumper<ceph::Formatter> {
public:
explicit FormattingDumper(const CrushWrapper *crush,
const name_map_t& weight_set_names)
: Dumper<ceph::Formatter>(crush, weight_set_names) {}
explicit FormattingDumper(const CrushWrapper *crush,
const name_map_t& weight_set_names,
bool show_shadow)
: Dumper<ceph::Formatter>(crush, weight_set_names, show_shadow) {}
protected:
void dump_item(const Item &qi, ceph::Formatter *f) override {
f->open_object_section("item");
dump_item_fields(qi, f);
dump_bucket_children(qi, f);
f->close_section();
}
virtual void dump_item_fields(const Item &qi, ceph::Formatter *f) {
CrushTreeDumper::dump_item_fields(crush, weight_set_names, qi, f);
}
virtual void dump_bucket_children(const Item &qi, ceph::Formatter *f) {
CrushTreeDumper::dump_bucket_children(crush, qi, f);
}
};
}
#endif
| 7,915 | 26.109589 | 81 | h |
null | ceph-main/src/crush/crush.c | #ifdef __KERNEL__
# include <linux/slab.h>
# include <linux/crush/crush.h>
#else
# include "crush_compat.h"
# include "crush.h"
#endif
const char *crush_bucket_alg_name(int alg)
{
switch (alg) {
case CRUSH_BUCKET_UNIFORM: return "uniform";
case CRUSH_BUCKET_LIST: return "list";
case CRUSH_BUCKET_TREE: return "tree";
case CRUSH_BUCKET_STRAW: return "straw";
case CRUSH_BUCKET_STRAW2: return "straw2";
default: return "unknown";
}
}
/**
* crush_get_bucket_item_weight - Get weight of an item in given bucket
* @b: bucket pointer
* @p: item index in bucket
*/
int crush_get_bucket_item_weight(const struct crush_bucket *b, int p)
{
if ((__u32)p >= b->size)
return 0;
switch (b->alg) {
case CRUSH_BUCKET_UNIFORM:
return ((struct crush_bucket_uniform *)b)->item_weight;
case CRUSH_BUCKET_LIST:
return ((struct crush_bucket_list *)b)->item_weights[p];
case CRUSH_BUCKET_TREE:
return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)];
case CRUSH_BUCKET_STRAW:
return ((struct crush_bucket_straw *)b)->item_weights[p];
case CRUSH_BUCKET_STRAW2:
return ((struct crush_bucket_straw2 *)b)->item_weights[p];
}
return 0;
}
void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b)
{
kfree(b->h.items);
kfree(b);
}
void crush_destroy_bucket_list(struct crush_bucket_list *b)
{
kfree(b->item_weights);
kfree(b->sum_weights);
kfree(b->h.items);
kfree(b);
}
void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
{
kfree(b->h.items);
kfree(b->node_weights);
kfree(b);
}
void crush_destroy_bucket_straw(struct crush_bucket_straw *b)
{
kfree(b->straws);
kfree(b->item_weights);
kfree(b->h.items);
kfree(b);
}
void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b)
{
kfree(b->item_weights);
kfree(b->h.items);
kfree(b);
}
void crush_destroy_bucket(struct crush_bucket *b)
{
switch (b->alg) {
case CRUSH_BUCKET_UNIFORM:
crush_destroy_bucket_uniform((struct crush_bucket_uniform *)b);
break;
case CRUSH_BUCKET_LIST:
crush_destroy_bucket_list((struct crush_bucket_list *)b);
break;
case CRUSH_BUCKET_TREE:
crush_destroy_bucket_tree((struct crush_bucket_tree *)b);
break;
case CRUSH_BUCKET_STRAW:
crush_destroy_bucket_straw((struct crush_bucket_straw *)b);
break;
case CRUSH_BUCKET_STRAW2:
crush_destroy_bucket_straw2((struct crush_bucket_straw2 *)b);
break;
}
}
/**
* crush_destroy - Destroy a crush_map
* @map: crush_map pointer
*/
void crush_destroy(struct crush_map *map)
{
/* buckets */
if (map->buckets) {
__s32 b;
for (b = 0; b < map->max_buckets; b++) {
if (map->buckets[b] == NULL)
continue;
crush_destroy_bucket(map->buckets[b]);
}
kfree(map->buckets);
}
/* rules */
if (map->rules) {
__u32 b;
for (b = 0; b < map->max_rules; b++)
crush_destroy_rule(map->rules[b]);
kfree(map->rules);
}
#ifndef __KERNEL__
kfree(map->choose_tries);
#endif
kfree(map);
}
void crush_destroy_rule(struct crush_rule *rule)
{
kfree(rule);
}
| 2,978 | 20.586957 | 80 | c |
null | ceph-main/src/crush/crush.h | #ifndef CEPH_CRUSH_CRUSH_H
#define CEPH_CRUSH_CRUSH_H
#ifdef __KERNEL__
# include <linux/types.h>
#else
# include "crush_compat.h"
#endif
/*
* CRUSH is a pseudo-random data distribution algorithm that
* efficiently distributes input values (typically, data objects)
* across a heterogeneous, structured storage cluster.
*
* The algorithm was originally described in detail in this paper
* (although the algorithm has evolved somewhat since then):
*
* http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
*
* LGPL-2.1 or LGPL-3.0
*/
#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
#define CRUSH_MAX_RULES (1<<8) /* max crush rule id */
#define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u)
#define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u)
#define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */
/** @ingroup API
* The equivalent of NULL for an item, i.e. the absence of an item.
*/
#define CRUSH_ITEM_NONE 0x7fffffff
/*
* CRUSH uses user-defined "rules" to describe how inputs should be
* mapped to devices. A rule consists of sequence of steps to perform
* to generate the set of output devices.
*/
struct crush_rule_step {
__u32 op;
__s32 arg1;
__s32 arg2;
};
/** @ingroup API
*/
enum crush_opcodes {
/*! do nothing
*/
CRUSH_RULE_NOOP = 0,
CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */
CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */
/* arg2 = type */
CRUSH_RULE_CHOOSE_INDEP = 3, /* same */
CRUSH_RULE_EMIT = 4, /* no args */
CRUSH_RULE_CHOOSELEAF_FIRSTN = 6,
CRUSH_RULE_CHOOSELEAF_INDEP = 7,
CRUSH_RULE_SET_CHOOSE_TRIES = 8, /* override choose_total_tries */
CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12,
CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13
};
/*
* for specifying choose num (arg1) relative to the max parameter
* passed to do_rule
*/
#define CRUSH_CHOOSE_N 0
#define CRUSH_CHOOSE_N_MINUS(x) (-(x))
struct crush_rule {
__u32 len;
__u8 __unused_was_rule_mask_ruleset;
__u8 type;
__u8 deprecated_min_size;
__u8 deprecated_max_size;
struct crush_rule_step steps[0];
};
#define crush_rule_size(len) (sizeof(struct crush_rule) + \
(len)*sizeof(struct crush_rule_step))
/*
* A bucket is a named container of other items (either devices or
* other buckets).
*/
/** @ingroup API
*
* Items within a bucket are chosen with crush_do_rule() using one of
* three algorithms representing a tradeoff between performance and
* reorganization efficiency. If you are unsure of which bucket type
* to use, we recommend using ::CRUSH_BUCKET_STRAW2.
*
* The table summarizes how the speed of each option measures up
* against mapping stability when items are added or removed.
*
* Bucket Alg Speed Additions Removals
* ------------------------------------------------
* uniform O(1) poor poor
* list O(n) optimal poor
* straw2 O(n) optimal optimal
*/
enum crush_algorithm {
/*!
* Devices are rarely added individually in a large system.
* Instead, new storage is typically deployed in blocks of identical
* devices, often as an additional shelf in a server rack or perhaps
* an entire cabinet. Devices reaching their end of life are often
* similarly decommissioned as a set (individual failures aside),
* making it natural to treat them as a unit. CRUSH uniform buckets
* are used to represent an identical set of devices in such
* circumstances. The key advantage in doing so is performance
* related: CRUSH can map replicas into uniform buckets in constant
* time. In cases where the uniformity restrictions are not
* appropriate, other bucket types can be used. If the size of a
* uniform bucket changes, there is a complete reshuffling of data
* between devices, much like conventional hash-based distribution
* strategies.
*/
CRUSH_BUCKET_UNIFORM = 1,
/*!
* List buckets structure their contents as a linked list, and
* can contain items with arbitrary weights. To place a
* replica, CRUSH begins at the head of the list with the most
* recently added item and compares its weight to the sum of
* all remaining items' weights. Depending on the value of
* hash( x , r , item), either the current item is chosen with
* the appropriate probability, or the process continues
* recursively down the list. This is a natural and intuitive
* choice for an expanding cluster: either an object is
* relocated to the newest device with some appropriate
* probability, or it remains on the older devices as before.
* The result is optimal data migration when items are added
* to the bucket. Items removed from the middle or tail of the
* list, however, can result in a significant amount of
* unnecessary movement, making list buckets most suitable for
* circumstances in which they never (or very rarely) shrink.
*/
CRUSH_BUCKET_LIST = 2,
/*! @cond INTERNAL */
CRUSH_BUCKET_TREE = 3,
CRUSH_BUCKET_STRAW = 4,
/*! @endcond */
/*!
* List and tree buckets are structured such that a limited
* number of hash values need to be calculated and compared to
* weights in order to select a bucket item. In doing so,
* they divide and conquer in a way that either gives certain
* items precedence (e. g., those at the beginning of a list)
* or obviates the need to consider entire subtrees of items
* at all. That improves the performance of the replica
* placement process, but can also introduce suboptimal
* reorganization behavior when the contents of a bucket
* change due an addition, removal, or re-weighting of an
* item.
*
* The straw2 bucket type allows all items to fairly "compete"
* against each other for replica placement through a process
* analogous to a draw of straws. To place a replica, a straw
* of random length is drawn for each item in the bucket. The
* item with the longest straw wins. The length of each straw
* is initially a value in a fixed range. Each straw length
* is scaled by a factor based on the item's weight so that
* heavily weighted items are more likely to win the draw.
* Although this process is almost twice as slow (on average)
* than a list bucket and even slower than a tree bucket
* (which scales logarithmically), straw2 buckets result in
* optimal data movement between nested items when modified.
*/
CRUSH_BUCKET_STRAW2 = 5,
};
extern const char *crush_bucket_alg_name(int alg);
/*
* although tree was a legacy algorithm, it has been buggy, so
* exclude it.
*/
#define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \
(1 << CRUSH_BUCKET_UNIFORM) | \
(1 << CRUSH_BUCKET_LIST) | \
(1 << CRUSH_BUCKET_STRAW))
/** @ingroup API
*
* A bucket contains __size__ __items__ which are either positive
* numbers or negative numbers that reference other buckets and is
* uniquely identified with __id__ which is a negative number. The
* __weight__ of a bucket is the cumulative weight of all its
* children. A bucket is assigned a ::crush_algorithm that is used by
* crush_do_rule() to draw an item depending on its weight. A bucket
* can be assigned a strictly positive (> 0) __type__ defined by the
* caller. The __type__ can be used by crush_do_rule(), when it is
* given as an argument of a rule step.
*
* A pointer to crush_bucket can safely be cast into the following
* structure, depending on the value of __alg__:
*
* - __alg__ == ::CRUSH_BUCKET_UNIFORM cast to crush_bucket_uniform
* - __alg__ == ::CRUSH_BUCKET_LIST cast to crush_bucket_list
* - __alg__ == ::CRUSH_BUCKET_STRAW2 cast to crush_bucket_straw2
*
* The weight of each item depends on the algorithm and the
* information about it is available in the corresponding structure
* (crush_bucket_uniform, crush_bucket_list or crush_bucket_straw2).
*
* See crush_map for more information on how __id__ is used
* to reference the bucket.
*/
struct crush_bucket {
__s32 id; /*!< bucket identifier, < 0 and unique within a crush_map */
__u16 type; /*!< > 0 bucket type, defined by the caller */
__u8 alg; /*!< the item selection ::crush_algorithm */
/*! @cond INTERNAL */
__u8 hash; /* which hash function to use, CRUSH_HASH_* */
/*! @endcond */
__u32 weight; /*!< 16.16 fixed point cumulated children weight */
__u32 size; /*!< size of the __items__ array */
__s32 *items; /*!< array of children: < 0 are buckets, >= 0 items */
};
/** @ingroup API
*
* Replacement weights for each item in a bucket. The size of the
* array must be exactly the size of the straw2 bucket, just as the
* item_weights array.
*
*/
struct crush_weight_set {
__u32 *weights; /*!< 16.16 fixed point weights in the same order as items */
__u32 size; /*!< size of the __weights__ array */
};
/** @ingroup API
*
* Replacement weights and ids for a given straw2 bucket, for
* placement purposes.
*
* When crush_do_rule() chooses the Nth item from a straw2 bucket, the
* replacement weights found at __weight_set[N]__ are used instead of
* the weights from __item_weights__. If __N__ is greater than
* __weight_set_positions__, the weights found at __weight_set_positions-1__ are
* used instead. For instance if __weight_set__ is:
*
* [ [ 0x10000, 0x20000 ], // position 0
* [ 0x20000, 0x40000 ] ] // position 1
*
* choosing the 0th item will use position 0 weights [ 0x10000, 0x20000 ]
* choosing the 1th item will use position 1 weights [ 0x20000, 0x40000 ]
* choosing the 2th item will use position 1 weights [ 0x20000, 0x40000 ]
* etc.
*
*/
struct crush_choose_arg {
__s32 *ids; /*!< values to use instead of items */
__u32 ids_size; /*!< size of the __ids__ array */
struct crush_weight_set *weight_set; /*!< weight replacements for a given position */
__u32 weight_set_positions; /*!< size of the __weight_set__ array */
};
/** @ingroup API
*
* Replacement weights and ids for each bucket in the crushmap. The
* __size__ of the __args__ array must be exactly the same as the
* __map->max_buckets__.
*
* The __crush_choose_arg__ at index N will be used when choosing
* an item from the bucket __map->buckets[N]__ bucket, provided it
* is a straw2 bucket.
*
*/
struct crush_choose_arg_map {
struct crush_choose_arg *args; /*!< replacement for each bucket in the crushmap */
__u32 size; /*!< size of the __args__ array */
};
/** @ingroup API
* The weight of each item in the bucket when
* __h.alg__ == ::CRUSH_BUCKET_UNIFORM.
*/
struct crush_bucket_uniform {
struct crush_bucket h; /*!< generic bucket information */
__u32 item_weight; /*!< 16.16 fixed point weight for each item */
};
/** @ingroup API
* The weight of each item in the bucket when
* __h.alg__ == ::CRUSH_BUCKET_LIST.
*
* The weight of __h.items[i]__ is __item_weights[i]__ for i in
* [0,__h.size__[. The __sum_weight__[i] is the sum of the __item_weights[j]__
* for j in [0,i[.
*
*/
struct crush_bucket_list {
struct crush_bucket h; /*!< generic bucket information */
__u32 *item_weights; /*!< 16.16 fixed point weight for each item */
__u32 *sum_weights; /*!< 16.16 fixed point sum of the weights */
};
struct crush_bucket_tree {
struct crush_bucket h; /* note: h.size is _tree_ size, not number of
actual items */
__u8 num_nodes;
__u32 *node_weights;
};
struct crush_bucket_straw {
struct crush_bucket h;
__u32 *item_weights; /* 16-bit fixed point */
__u32 *straws; /* 16-bit fixed point */
};
/** @ingroup API
* The weight of each item in the bucket when
* __h.alg__ == ::CRUSH_BUCKET_STRAW2.
*
* The weight of __h.items[i]__ is __item_weights[i]__ for i in
* [0,__h.size__].
*/
struct crush_bucket_straw2 {
struct crush_bucket h; /*!< generic bucket information */
__u32 *item_weights; /*!< 16.16 fixed point weight for each item */
};
/** @ingroup API
*
* A crush map define a hierarchy of crush_bucket that end with leaves
* (buckets and leaves are called items) and a set of crush_rule to
* map an integer to items with the crush_do_rule() function.
*
*/
struct crush_map {
/*! An array of crush_bucket pointers of size __max_buckets__.
* An element of the array may be NULL if the bucket was removed with
* crush_remove_bucket(). The buckets must be added with crush_add_bucket().
* The bucket found at __buckets[i]__ must have a crush_bucket.id == -1-i.
*/
struct crush_bucket **buckets;
/*! An array of crush_rule pointers of size __max_rules__.
* An element of the array may be NULL if the rule was removed (there is
* no API to do so but there may be one in the future). The rules must be added
* with crush_add_rule().
*/
struct crush_rule **rules;
__s32 max_buckets; /*!< the size of __buckets__ */
__u32 max_rules; /*!< the size of __rules__ */
/*! The value of the highest item stored in the crush_map + 1
*/
__s32 max_devices;
/*! Backward compatibility tunable. It implements a bad solution
* and must always be set to 0 except for backward compatibility
* purposes
*/
__u32 choose_local_tries;
/*! Backward compatibility tunable. It implements a bad solution
* and must always be set to 0 except for backward compatibility
* purposes
*/
__u32 choose_local_fallback_tries;
/*! Tunable. The default value when the CHOOSE_TRIES or
* CHOOSELEAF_TRIES steps are omitted in a rule. See the
* documentation for crush_rule_set_step() for more
* information
*/
__u32 choose_total_tries;
/*! Backward compatibility tunable. It should always be set
* to 1 except for backward compatibility. Implemented in 2012
* it was generalized late 2013 and is mostly unused except
* in one border case, reason why it must be set to 1.
*
* Attempt chooseleaf inner descent once for firstn mode; on
* reject retry outer descent. Note that this does *not*
* apply to a collision: in that case we will retry as we
* used to.
*/
__u32 chooseleaf_descend_once;
/*! Backward compatibility tunable. It is a fix for bad
* mappings implemented in 2014 at
* https://github.com/ceph/ceph/pull/1185. It should always
* be set to 1 except for backward compatibility.
*
* If non-zero, feed r into chooseleaf, bit-shifted right by
* (r-1) bits. a value of 1 is best for new clusters. for
* legacy clusters that want to limit reshuffling, a value of
* 3 or 4 will make the mappings line up a bit better with
* previous mappings.
*/
__u8 chooseleaf_vary_r;
/*! Backward compatibility tunable. It is an improvement that
* avoids unnecessary mapping changes, implemented at
* https://github.com/ceph/ceph/pull/6572 and explained in
* this post: "chooseleaf may cause some unnecessary pg
* migrations" in October 2015
* https://www.mail-archive.com/[email protected]/msg26075.html
* It should always be set to 1 except for backward compatibility.
*/
__u8 chooseleaf_stable;
/*! @cond INTERNAL */
/* This value is calculated after decode or construction by
the builder. It is exposed here (rather than having a
'build CRUSH working space' function) so that callers can
reserve a static buffer, allocate space on the stack, or
otherwise avoid calling into the heap allocator if they
want to. The size of the working space depends on the map,
while the size of the scratch vector passed to the mapper
depends on the size of the desired result set.
Nothing stops the caller from allocating both in one swell
foop and passing in two points, though. */
size_t working_size;
#ifndef __KERNEL__
/*! @endcond */
/*! Backward compatibility tunable. It is a fix for the straw
* scaler values for the straw algorithm which is deprecated
* (straw2 replaces it) implemented at
* https://github.com/ceph/ceph/pull/3057. It should always
* be set to 1 except for backward compatibility.
*
*/
__u8 straw_calc_version;
/*! @cond INTERNAL */
/*
* allowed bucket algs is a bitmask, here the bit positions
* are CRUSH_BUCKET_*. note that these are *bits* and
* CRUSH_BUCKET_* values are not, so we need to or together (1
* << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to
* minimize confusion (bucket type values start at 1).
*/
__u32 allowed_bucket_algs;
__u32 *choose_tries;
#endif
/*! @endcond */
};
/* crush.c */
/** @ingroup API
*
* Return the 16.16 fixed point weight of the item at __pos__ (zero
* based index) within the bucket __b__. If __pos__ is negative or
* greater or equal to the number of items in the bucket, return 0.
*
* @param b the bucket containing items
* @param pos the zero based index of the item
*
* @returns the 16.16 fixed point item weight
*/
extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b);
/** @ingroup API
*
* Deallocate a bucket created via crush_add_bucket().
*
* @param b the bucket to deallocate
*/
extern void crush_destroy_bucket(struct crush_bucket *b);
/** @ingroup API
*
* Deallocate a rule created via crush_add_rule().
*
* @param r the rule to deallocate
*/
extern void crush_destroy_rule(struct crush_rule *r);
/** @ingroup API
*
* Deallocate the __map__, previously allocated with crush_create.
*
* @param map the crush map
*/
extern void crush_destroy(struct crush_map *map);
static inline int crush_calc_tree_node(int i)
{
return ((i+1) << 1)-1;
}
static inline const char *crush_alg_name(int alg)
{
switch (alg) {
case CRUSH_BUCKET_UNIFORM:
return "uniform";
case CRUSH_BUCKET_LIST:
return "list";
case CRUSH_BUCKET_TREE:
return "tree";
case CRUSH_BUCKET_STRAW:
return "straw";
case CRUSH_BUCKET_STRAW2:
return "straw2";
default:
return "unknown";
}
}
/* ---------------------------------------------------------------------
Private
--------------------------------------------------------------------- */
/* These data structures are private to the CRUSH implementation. They
are exposed in this header file because builder needs their
definitions to calculate the total working size.
Moving this out of the crush map allow us to treat the CRUSH map as
immutable within the mapper and removes the requirement for a CRUSH
map lock. */
struct crush_work_bucket {
__u32 perm_x; /* @x for which *perm is defined */
__u32 perm_n; /* num elements of *perm that are permuted/defined */
__u32 *perm; /* Permutation of the bucket's items */
} __attribute__ ((packed));
struct crush_work {
struct crush_work_bucket **work; /* Per-bucket working store */
};
#endif
| 20,023 | 36.081481 | 88 | h |
null | ceph-main/src/crush/crush_compat.h | #ifndef CEPH_CRUSH_COMPAT_H
#define CEPH_CRUSH_COMPAT_H
#include "include/int_types.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* asm-generic/bug.h */
#define BUG_ON(x) assert(!(x))
/* linux/kernel.h */
#define U8_MAX ((__u8)~0U)
#define S8_MAX ((__s8)(U8_MAX>>1))
#define S8_MIN ((__s8)(-S8_MAX - 1))
#define U16_MAX ((__u16)~0U)
#define S16_MAX ((__s16)(U16_MAX>>1))
#define S16_MIN ((__s16)(-S16_MAX - 1))
#define U32_MAX ((__u32)~0U)
#define S32_MAX ((__s32)(U32_MAX>>1))
#define S32_MIN ((__s32)(-S32_MAX - 1))
#define U64_MAX ((__u64)~0ULL)
#define S64_MAX ((__s64)(U64_MAX>>1))
#define S64_MIN ((__s64)(-S64_MAX - 1))
/* linux/math64.h */
#define div64_s64(dividend, divisor) ((dividend) / (divisor))
/* linux/slab.h */
#define kmalloc(size, flags) malloc(size)
#define kfree(x) do { if (x) free(x); } while (0)
#endif /* CEPH_CRUSH_COMPAT_H */
| 914 | 21.875 | 61 | h |
null | ceph-main/src/crush/crush_ln_table.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Intel Corporation All Rights Reserved
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CRUSH_LN_H
#define CEPH_CRUSH_LN_H
#ifdef __KERNEL__
# include <linux/types.h>
#else
# include "crush_compat.h"
#endif
/*
* RH_LH_tbl[2*k] = 2^48/(1.0+k/128.0)
* RH_LH_tbl[2*k+1] = 2^48*log2(1.0+k/128.0)
*/
static __s64 __RH_LH_tbl[128*2+2] = {
0x0001000000000000ll, 0x0000000000000000ll, 0x0000fe03f80fe040ll, 0x000002dfca16dde1ll,
0x0000fc0fc0fc0fc1ll, 0x000005b9e5a170b4ll, 0x0000fa232cf25214ll, 0x0000088e68ea899all,
0x0000f83e0f83e0f9ll, 0x00000b5d69bac77ell, 0x0000f6603d980f67ll, 0x00000e26fd5c8555ll,
0x0000f4898d5f85bcll, 0x000010eb389fa29fll, 0x0000f2b9d6480f2cll, 0x000013aa2fdd27f1ll,
0x0000f0f0f0f0f0f1ll, 0x00001663f6fac913ll, 0x0000ef2eb71fc435ll, 0x00001918a16e4633ll,
0x0000ed7303b5cc0fll, 0x00001bc84240adabll, 0x0000ebbdb2a5c162ll, 0x00001e72ec117fa5ll,
0x0000ea0ea0ea0ea1ll, 0x00002118b119b4f3ll, 0x0000e865ac7b7604ll, 0x000023b9a32eaa56ll,
0x0000e6c2b4481cd9ll, 0x00002655d3c4f15cll, 0x0000e525982af70dll, 0x000028ed53f307eell,
0x0000e38e38e38e39ll, 0x00002b803473f7adll, 0x0000e1fc780e1fc8ll, 0x00002e0e85a9de04ll,
0x0000e070381c0e08ll, 0x0000309857a05e07ll, 0x0000dee95c4ca038ll, 0x0000331dba0efce1ll,
0x0000dd67c8a60dd7ll, 0x0000359ebc5b69d9ll, 0x0000dbeb61eed19dll, 0x0000381b6d9bb29bll,
0x0000da740da740dbll, 0x00003a93dc9864b2ll, 0x0000d901b2036407ll, 0x00003d0817ce9cd4ll,
0x0000d79435e50d7all, 0x00003f782d7204d0ll, 0x0000d62b80d62b81ll, 0x000041e42b6ec0c0ll,
0x0000d4c77b03531ell, 0x0000444c1f6b4c2dll, 0x0000d3680d3680d4ll, 0x000046b016ca47c1ll,
0x0000d20d20d20d21ll, 0x000049101eac381cll, 0x0000d0b69fcbd259ll, 0x00004b6c43f1366all,
0x0000cf6474a8819fll, 0x00004dc4933a9337ll, 0x0000ce168a772509ll, 0x0000501918ec6c11ll,
0x0000cccccccccccdll, 0x00005269e12f346ell, 0x0000cb8727c065c4ll, 0x000054b6f7f1325all,
0x0000ca4587e6b750ll, 0x0000570068e7ef5all, 0x0000c907da4e8712ll, 0x000059463f919deell,
0x0000c7ce0c7ce0c8ll, 0x00005b8887367433ll, 0x0000c6980c6980c7ll, 0x00005dc74ae9fbecll,
0x0000c565c87b5f9ell, 0x00006002958c5871ll, 0x0000c4372f855d83ll, 0x0000623a71cb82c8ll,
0x0000c30c30c30c31ll, 0x0000646eea247c5cll, 0x0000c1e4bbd595f7ll, 0x000066a008e4788cll,
0x0000c0c0c0c0c0c1ll, 0x000068cdd829fd81ll, 0x0000bfa02fe80bfbll, 0x00006af861e5fc7dll,
0x0000be82fa0be830ll, 0x00006d1fafdce20all, 0x0000bd6910470767ll, 0x00006f43cba79e40ll,
0x0000bc52640bc527ll, 0x00007164beb4a56dll, 0x0000bb3ee721a54ell, 0x000073829248e961ll,
0x0000ba2e8ba2e8bbll, 0x0000759d4f80cba8ll, 0x0000b92143fa36f6ll, 0x000077b4ff5108d9ll,
0x0000b81702e05c0cll, 0x000079c9aa879d53ll, 0x0000b70fbb5a19bfll, 0x00007bdb59cca388ll,
0x0000b60b60b60b61ll, 0x00007dea15a32c1bll, 0x0000b509e68a9b95ll, 0x00007ff5e66a0ffell,
0x0000b40b40b40b41ll, 0x000081fed45cbccbll, 0x0000b30f63528918ll, 0x00008404e793fb81ll,
0x0000b21642c8590cll, 0x000086082806b1d5ll, 0x0000b11fd3b80b12ll, 0x000088089d8a9e47ll,
0x0000b02c0b02c0b1ll, 0x00008a064fd50f2all, 0x0000af3addc680b0ll, 0x00008c01467b94bbll,
0x0000ae4c415c9883ll, 0x00008df988f4ae80ll, 0x0000ad602b580ad7ll, 0x00008fef1e987409ll,
0x0000ac7691840ac8ll, 0x000091e20ea1393ell, 0x0000ab8f69e2835all, 0x000093d2602c2e5fll,
0x0000aaaaaaaaaaabll, 0x000095c01a39fbd6ll, 0x0000a9c84a47a080ll, 0x000097ab43af59f9ll,
0x0000a8e83f5717c1ll, 0x00009993e355a4e5ll, 0x0000a80a80a80a81ll, 0x00009b79ffdb6c8bll,
0x0000a72f0539782all, 0x00009d5d9fd5010bll, 0x0000a655c4392d7cll, 0x00009f3ec9bcfb80ll,
0x0000a57eb50295fbll, 0x0000a11d83f4c355ll, 0x0000a4a9cf1d9684ll, 0x0000a2f9d4c51039ll,
0x0000a3d70a3d70a4ll, 0x0000a4d3c25e68dcll, 0x0000a3065e3fae7dll, 0x0000a6ab52d99e76ll,
0x0000a237c32b16d0ll, 0x0000a8808c384547ll, 0x0000a16b312ea8fdll, 0x0000aa5374652a1cll,
0x0000a0a0a0a0a0a1ll, 0x0000ac241134c4e9ll, 0x00009fd809fd80a0ll, 0x0000adf26865a8a1ll,
0x00009f1165e72549ll, 0x0000afbe7fa0f04dll, 0x00009e4cad23dd60ll, 0x0000b1885c7aa982ll,
0x00009d89d89d89d9ll, 0x0000b35004723c46ll, 0x00009cc8e160c3fcll, 0x0000b5157cf2d078ll,
0x00009c09c09c09c1ll, 0x0000b6d8cb53b0call, 0x00009b4c6f9ef03bll, 0x0000b899f4d8ab63ll,
0x00009a90e7d95bc7ll, 0x0000ba58feb2703all, 0x000099d722dabde6ll, 0x0000bc15edfeed32ll,
0x0000991f1a515886ll, 0x0000bdd0c7c9a817ll, 0x00009868c809868dll, 0x0000bf89910c1678ll,
0x000097b425ed097cll, 0x0000c1404eadf383ll, 0x000097012e025c05ll, 0x0000c2f5058593d9ll,
0x0000964fda6c0965ll, 0x0000c4a7ba58377cll, 0x000095a02568095bll, 0x0000c65871da59ddll,
0x000094f2094f2095ll, 0x0000c80730b00016ll, 0x0000944580944581ll, 0x0000c9b3fb6d0559ll,
0x0000939a85c4093all, 0x0000cb5ed69565afll, 0x000092f113840498ll, 0x0000cd07c69d8702ll,
0x0000924924924925ll, 0x0000ceaecfea8085ll, 0x000091a2b3c4d5e7ll, 0x0000d053f6d26089ll,
0x000090fdbc090fdcll, 0x0000d1f73f9c70c0ll, 0x0000905a38633e07ll, 0x0000d398ae817906ll,
0x00008fb823ee08fcll, 0x0000d53847ac00a6ll, 0x00008f1779d9fdc4ll, 0x0000d6d60f388e41ll,
0x00008e78356d1409ll, 0x0000d8720935e643ll, 0x00008dda5202376all, 0x0000da0c39a54804ll,
0x00008d3dcb08d3ddll, 0x0000dba4a47aa996ll, 0x00008ca29c046515ll, 0x0000dd3b4d9cf24bll,
0x00008c08c08c08c1ll, 0x0000ded038e633f3ll, 0x00008b70344a139cll, 0x0000e0636a23e2eell,
0x00008ad8f2fba939ll, 0x0000e1f4e5170d02ll, 0x00008a42f870566all, 0x0000e384ad748f0ell,
0x000089ae4089ae41ll, 0x0000e512c6e54998ll, 0x0000891ac73ae982ll, 0x0000e69f35065448ll,
0x0000888888888889ll, 0x0000e829fb693044ll, 0x000087f78087f781ll, 0x0000e9b31d93f98ell,
0x00008767ab5f34e5ll, 0x0000eb3a9f019750ll, 0x000086d905447a35ll, 0x0000ecc08321eb30ll,
0x0000864b8a7de6d2ll, 0x0000ee44cd59ffabll, 0x000085bf37612cefll, 0x0000efc781043579ll,
0x0000853408534086ll, 0x0000f148a170700all, 0x000084a9f9c8084bll, 0x0000f2c831e44116ll,
0x0000842108421085ll, 0x0000f446359b1353ll, 0x0000839930523fbfll, 0x0000f5c2afc65447ll,
0x000083126e978d50ll, 0x0000f73da38d9d4all, 0x0000828cbfbeb9a1ll, 0x0000f8b7140edbb1ll,
0x0000820820820821ll, 0x0000fa2f045e7832ll, 0x000081848da8faf1ll, 0x0000fba577877d7dll,
0x0000810204081021ll, 0x0000fd1a708bbe11ll, 0x0000808080808081ll, 0x0000fe8df263f957ll,
0x0000800000000000ll, 0x0000ffff00000000ll,
};
/*
* LL_tbl[k] = 2^48*log2(1.0+k/2^15)
*/
static __s64 __LL_tbl[256] = {
0x0000000000000000ull, 0x00000002e2a60a00ull, 0x000000070cb64ec5ull, 0x00000009ef50ce67ull,
0x0000000cd1e588fdull, 0x0000000fb4747e9cull, 0x0000001296fdaf5eull, 0x0000001579811b58ull,
0x000000185bfec2a1ull, 0x0000001b3e76a552ull, 0x0000001e20e8c380ull, 0x0000002103551d43ull,
0x00000023e5bbb2b2ull, 0x00000026c81c83e4ull, 0x00000029aa7790f0ull, 0x0000002c8cccd9edull,
0x0000002f6f1c5ef2ull, 0x0000003251662017ull, 0x0000003533aa1d71ull, 0x0000003815e8571aull,
0x0000003af820cd26ull, 0x0000003dda537faeull, 0x00000040bc806ec8ull, 0x000000439ea79a8cull,
0x0000004680c90310ull, 0x0000004962e4a86cull, 0x0000004c44fa8ab6ull, 0x0000004f270aaa06ull,
0x0000005209150672ull, 0x00000054eb19a013ull, 0x00000057cd1876fdull, 0x0000005aaf118b4aull,
0x0000005d9104dd0full, 0x0000006072f26c64ull, 0x0000006354da3960ull, 0x0000006636bc441aull,
0x0000006918988ca8ull, 0x0000006bfa6f1322ull, 0x0000006edc3fd79full, 0x00000071be0ada35ull,
0x000000749fd01afdull, 0x00000077818f9a0cull, 0x0000007a6349577aull, 0x0000007d44fd535eull,
0x0000008026ab8dceull, 0x00000083085406e3ull, 0x00000085e9f6beb2ull, 0x00000088cb93b552ull,
0x0000008bad2aeadcull, 0x0000008e8ebc5f65ull, 0x0000009170481305ull, 0x0000009451ce05d3ull,
0x00000097334e37e5ull, 0x0000009a14c8a953ull, 0x0000009cf63d5a33ull, 0x0000009fd7ac4a9dull,
0x000000a2b07f3458ull, 0x000000a59a78ea6aull, 0x000000a87bd699fbull, 0x000000ab5d2e8970ull,
0x000000ae3e80b8e3ull, 0x000000b11fcd2869ull, 0x000000b40113d818ull, 0x000000b6e254c80aull,
0x000000b9c38ff853ull, 0x000000bca4c5690cull, 0x000000bf85f51a4aull, 0x000000c2671f0c26ull,
0x000000c548433eb6ull, 0x000000c82961b211ull, 0x000000cb0a7a664dull, 0x000000cdeb8d5b82ull,
0x000000d0cc9a91c8ull, 0x000000d3ada20933ull, 0x000000d68ea3c1ddull, 0x000000d96f9fbbdbull,
0x000000dc5095f744ull, 0x000000df31867430ull, 0x000000e2127132b5ull, 0x000000e4f35632eaull,
0x000000e7d43574e6ull, 0x000000eab50ef8c1ull, 0x000000ed95e2be90ull, 0x000000f076b0c66cull,
0x000000f35779106aull, 0x000000f6383b9ca2ull, 0x000000f918f86b2aull, 0x000000fbf9af7c1aull,
0x000000feda60cf88ull, 0x00000101bb0c658cull, 0x000001049bb23e3cull, 0x000001077c5259afull,
0x0000010a5cecb7fcull, 0x0000010d3d81593aull, 0x000001101e103d7full, 0x00000112fe9964e4ull,
0x00000115df1ccf7eull, 0x00000118bf9a7d64ull, 0x0000011ba0126eadull, 0x0000011e8084a371ull,
0x0000012160f11bc6ull, 0x000001244157d7c3ull, 0x0000012721b8d77full, 0x0000012a02141b10ull,
0x0000012ce269a28eull, 0x0000012fc2b96e0full, 0x00000132a3037daaull, 0x000001358347d177ull,
0x000001386386698cull, 0x0000013b43bf45ffull, 0x0000013e23f266e9ull, 0x00000141041fcc5eull,
0x00000143e4477678ull, 0x00000146c469654bull, 0x00000149a48598f0ull, 0x0000014c849c117cull,
0x0000014f64accf08ull, 0x0000015244b7d1a9ull, 0x0000015524bd1976ull, 0x0000015804bca687ull,
0x0000015ae4b678f2ull, 0x0000015dc4aa90ceull, 0x00000160a498ee31ull, 0x0000016384819134ull,
0x00000166646479ecull, 0x000001694441a870ull, 0x0000016c24191cd7ull, 0x0000016df6ca19bdull,
0x00000171e3b6d7aaull, 0x00000174c37d1e44ull, 0x00000177a33dab1cull, 0x0000017a82f87e49ull,
0x0000017d62ad97e2ull, 0x00000180425cf7feull, 0x00000182b07f3458ull, 0x0000018601aa8c19ull,
0x00000188e148c046ull, 0x0000018bc0e13b52ull, 0x0000018ea073fd52ull, 0x000001918001065dull,
0x000001945f88568bull, 0x000001973f09edf2ull, 0x0000019a1e85ccaaull, 0x0000019cfdfbf2c8ull,
0x0000019fdd6c6063ull, 0x000001a2bcd71593ull, 0x000001a59c3c126eull, 0x000001a87b9b570bull,
0x000001ab5af4e380ull, 0x000001ae3a48b7e5ull, 0x000001b11996d450ull, 0x000001b3f8df38d9ull,
0x000001b6d821e595ull, 0x000001b9b75eda9bull, 0x000001bc96961803ull, 0x000001bf75c79de3ull,
0x000001c254f36c51ull, 0x000001c534198365ull, 0x000001c81339e336ull, 0x000001caf2548bd9ull,
0x000001cdd1697d67ull, 0x000001d0b078b7f5ull, 0x000001d38f823b9aull, 0x000001d66e86086dull,
0x000001d94d841e86ull, 0x000001dc2c7c7df9ull, 0x000001df0b6f26dfull, 0x000001e1ea5c194eull,
0x000001e4c943555dull, 0x000001e7a824db23ull, 0x000001ea8700aab5ull, 0x000001ed65d6c42bull,
0x000001f044a7279dull, 0x000001f32371d51full, 0x000001f60236cccaull, 0x000001f8e0f60eb3ull,
0x000001fbbfaf9af3ull, 0x000001fe9e63719eull, 0x000002017d1192ccull, 0x000002045bb9fe94ull,
0x000002073a5cb50dull, 0x00000209c06e6212ull, 0x0000020cf791026aull, 0x0000020fd622997cull,
0x00000212b07f3458ull, 0x000002159334a8d8ull, 0x0000021871b52150ull, 0x0000021b502fe517ull,
0x0000021d6a73a78full, 0x000002210d144eeeull, 0x00000223eb7df52cull, 0x00000226c9e1e713ull,
0x00000229a84024bbull, 0x0000022c23679b4eull, 0x0000022f64eb83a8ull, 0x000002324338a51bull,
0x00000235218012a9ull, 0x00000237ffc1cc69ull, 0x0000023a2c3b0ea4ull, 0x0000023d13ee805bull,
0x0000024035e9221full, 0x00000243788faf25ull, 0x0000024656b4e735ull, 0x00000247ed646bfeull,
0x0000024c12ee3d98ull, 0x0000024ef1025c1aull, 0x00000251cf10c799ull, 0x0000025492644d65ull,
0x000002578b1c85eeull, 0x0000025a6919d8f0ull, 0x0000025d13ee805bull, 0x0000026025036716ull,
0x0000026296453882ull, 0x00000265e0d62b53ull, 0x00000268beb701f3ull, 0x0000026b9c92265eull,
0x0000026d32f798a9ull, 0x00000271583758ebull, 0x000002743601673bull, 0x0000027713c5c3b0ull,
0x00000279f1846e5full, 0x0000027ccf3d6761ull, 0x0000027e6580aecbull, 0x000002828a9e44b3ull,
0x0000028568462932ull, 0x00000287bdbf5255ull, 0x0000028b2384de4aull, 0x0000028d13ee805bull,
0x0000029035e9221full, 0x0000029296453882ull, 0x0000029699bdfb61ull, 0x0000029902a37aabull,
0x0000029c54b864c9ull, 0x0000029deabd1083ull, 0x000002a20f9c0bb5ull, 0x000002a4c7605d61ull,
0x000002a7bdbf5255ull, 0x000002a96056dafcull, 0x000002ac3daf14efull, 0x000002af1b019ecaull,
0x000002b296453882ull, 0x000002b5d022d80full, 0x000002b8fa471cb3ull, 0x000002ba9012e713ull,
0x000002bd6d4901ccull, 0x000002c04a796cf6ull, 0x000002c327a428a6ull, 0x000002c61a5e8f4cull,
0x000002c8e1e891f6ull, 0x000002cbbf023fc2ull, 0x000002ce9c163e6eull, 0x000002d179248e13ull,
0x000002d4562d2ec6ull, 0x000002d73330209dull, 0x000002da102d63b0ull, 0x000002dced24f814ull,
};
#endif
| 12,506 | 74.8 | 93 | h |
null | ceph-main/src/crush/hash.c | #ifdef __KERNEL__
# include <linux/crush/hash.h>
#else
# include "hash.h"
#endif
/*
* Robert Jenkins' function for mixing 32-bit values
* http://burtleburtle.net/bob/hash/evahash.html
* a, b = random bits, c = input and output
*/
#define crush_hashmix(a, b, c) do { \
a = a-b; a = a-c; a = a^(c>>13); \
b = b-c; b = b-a; b = b^(a<<8); \
c = c-a; c = c-b; c = c^(b>>13); \
a = a-b; a = a-c; a = a^(c>>12); \
b = b-c; b = b-a; b = b^(a<<16); \
c = c-a; c = c-b; c = c^(b>>5); \
a = a-b; a = a-c; a = a^(c>>3); \
b = b-c; b = b-a; b = b^(a<<10); \
c = c-a; c = c-b; c = c^(b>>15); \
} while (0)
#define crush_hash_seed 1315423911
static __u32 crush_hash32_rjenkins1(__u32 a)
{
__u32 hash = crush_hash_seed ^ a;
__u32 b = a;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(b, x, hash);
crush_hashmix(y, a, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_2(__u32 a, __u32 b)
{
__u32 hash = crush_hash_seed ^ a ^ b;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(x, a, hash);
crush_hashmix(b, y, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_3(__u32 a, __u32 b, __u32 c)
{
__u32 hash = crush_hash_seed ^ a ^ b ^ c;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(c, x, hash);
crush_hashmix(y, a, hash);
crush_hashmix(b, x, hash);
crush_hashmix(y, c, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_4(__u32 a, __u32 b, __u32 c, __u32 d)
{
__u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(c, d, hash);
crush_hashmix(a, x, hash);
crush_hashmix(y, b, hash);
crush_hashmix(c, x, hash);
crush_hashmix(y, d, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_5(__u32 a, __u32 b, __u32 c, __u32 d,
__u32 e)
{
__u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(c, d, hash);
crush_hashmix(e, x, hash);
crush_hashmix(y, a, hash);
crush_hashmix(b, x, hash);
crush_hashmix(y, c, hash);
crush_hashmix(d, x, hash);
crush_hashmix(y, e, hash);
return hash;
}
__u32 crush_hash32(int type, __u32 a)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1(a);
default:
return 0;
}
}
__u32 crush_hash32_2(int type, __u32 a, __u32 b)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_2(a, b);
default:
return 0;
}
}
__u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_3(a, b, c);
default:
return 0;
}
}
__u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_4(a, b, c, d);
default:
return 0;
}
}
__u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_5(a, b, c, d, e);
default:
return 0;
}
}
const char *crush_hash_name(int type)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return "rjenkins1";
default:
return "unknown";
}
}
| 3,206 | 20.098684 | 75 | c |
null | ceph-main/src/crush/hash.h | #ifndef CEPH_CRUSH_HASH_H
#define CEPH_CRUSH_HASH_H
#ifdef __KERNEL__
# include <linux/types.h>
#else
# include "crush_compat.h"
#endif
#define CRUSH_HASH_RJENKINS1 0
#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1
extern const char *crush_hash_name(int type);
extern __u32 crush_hash32(int type, __u32 a);
extern __u32 crush_hash32_2(int type, __u32 a, __u32 b);
extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c);
extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d);
extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d,
__u32 e);
#endif
| 611 | 24.5 | 74 | h |
null | ceph-main/src/crush/mapper.h | #ifndef CEPH_CRUSH_MAPPER_H
#define CEPH_CRUSH_MAPPER_H
/*
* CRUSH functions for find rules and then mapping an input to an
* output set.
*
* LGPL-2.1 or LGPL-3.0
*/
#include "crush.h"
/** @ingroup API
*
* Map __x__ to __result_max__ items and store them in the __result__
* array. The mapping is done by following each step of the rule
* __ruleno__. See crush_make_rule(), crush_rule_set_step() and
* crush_add_rule() for more information on how the rules are created,
* populated and added to the crush __map__.
*
* The return value is the the number of items in the __result__
* array. If the caller asked for __result_max__ items and the return
* value is X where X < __result_max__, the content of __result[0,X[__
* is defined but the content of __result[X,result_max[__ is
* undefined. For example:
*
* crush_do_rule(map, ruleno=1, x=1, result, result_max=3,...) == 1
* result[0] is set
* result[1] is undefined
* result[2] is undefined
*
* An entry in the __result__ array is either an item in the crush
* __map__ or ::CRUSH_ITEM_NONE if no item was found. For example:
*
* crush_do_rule(map, ruleno=1, x=1, result, result_max=4,...) == 2
* result[0] is CRUSH_ITEM_NONE
* result[1] is item number 5
* result[2] is undefined
* result[3] is undefined
*
* The __weight__ array contains the probabilities that a leaf is
* ignored even if it is selected. It is a 16.16 fixed point
* number in the range [0x00000,0x10000]. The lower the value, the
* more often the leaf is ignored. For instance:
*
* - weight[leaf] == 0x00000 == 0.0 always ignore
* - weight[leaf] == 0x10000 == 1.0 never ignore
* - weight[leaf] == 0x08000 == 0.5 ignore 50% of the time
* - weight[leaf] == 0x04000 == 0.25 ignore 75% of the time
* - etc.
*
* During mapping, each leaf is checked against the __weight__ array,
* using the leaf as an index. If there is no entry in __weight__ for
* the leaf, it is ignored. If there is an entry, the leaf will be
* ignored some of the time, depending on the probability.
*
* The __cwin__ argument must be set as follows:
*
* char __cwin__[crush_work_size(__map__, __result_max__)];
* crush_init_workspace(__map__, __cwin__);
*
* @param map the crush_map
* @param ruleno a positive integer < __CRUSH_MAX_RULES__
* @param x the value to map to __result_max__ items
* @param result an array of items of size __result_max__
* @param result_max the size of the __result__ array
* @param weights an array of weights of size __weight_max__
* @param weight_max the size of the __weights__ array
* @param cwin must be an char array initialized by crush_init_workspace
* @param choose_args weights and ids for each known bucket
*
* @return 0 on error or the size of __result__ on success
*/
extern int crush_do_rule(const struct crush_map *map,
int ruleno,
int x, int *result, int result_max,
const __u32 *weights, int weight_max,
void *cwin, const struct crush_choose_arg *choose_args);
/* Returns the exact amount of workspace that will need to be used
for a given combination of crush_map and result_max. The caller can
then allocate this much on its own, either on the stack, in a
per-thread long-lived buffer, or however it likes. */
static inline size_t crush_work_size(const struct crush_map *map,
int result_max) {
return map->working_size + result_max * 3 * sizeof(__u32);
}
extern void crush_init_workspace(const struct crush_map *m, void *v);
#endif
| 3,525 | 36.913978 | 72 | h |
null | ceph-main/src/crypto/crypto_accel.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Mirantis, Inc.
*
* Author: Adam Kupczyk <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CRYPTO_ACCEL_H
#define CRYPTO_ACCEL_H
#include <cstddef>
#include "include/Context.h"
class optional_yield;
class CryptoAccel;
typedef std::shared_ptr<CryptoAccel> CryptoAccelRef;
class CryptoAccel {
public:
CryptoAccel() {}
CryptoAccel(const size_t chunk_size, const size_t max_requests) {}
virtual ~CryptoAccel() {}
static const int AES_256_IVSIZE = 128/8;
static const int AES_256_KEYSIZE = 256/8;
virtual bool cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) = 0;
virtual bool cbc_decrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) = 0;
virtual bool cbc_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) = 0;
virtual bool cbc_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) = 0;
};
#endif
| 1,869 | 35.666667 | 90 | h |
null | ceph-main/src/crypto/crypto_plugin.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Mirantis, Inc.
*
* Author: Adam Kupczyk <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CRYPTO_PLUGIN_H
#define CRYPTO_PLUGIN_H
// -----------------------------------------------------------------------------
#include "common/PluginRegistry.h"
#include "ostream"
#include "crypto/crypto_accel.h"
#include <boost/asio/io_context.hpp>
// -----------------------------------------------------------------------------
class CryptoPlugin : public ceph::Plugin {
public:
CryptoAccelRef cryptoaccel;
explicit CryptoPlugin(CephContext* cct) : Plugin(cct)
{}
~CryptoPlugin()
{}
virtual int factory(CryptoAccelRef *cs,
std::ostream *ss,
const size_t chunk_size,
const size_t max_requests) = 0;
};
#endif
| 1,109 | 26.75 | 80 | h |
null | ceph-main/src/crypto/isa-l/isal_crypto_accel.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Mirantis, Inc.
*
* Author: Adam Kupczyk <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef ISAL_CRYPTO_ACCEL_H
#define ISAL_CRYPTO_ACCEL_H
#include "crypto/crypto_accel.h"
#include "common/async/yield_context.h"
class ISALCryptoAccel : public CryptoAccel {
public:
ISALCryptoAccel() {}
virtual ~ISALCryptoAccel() {}
bool cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override;
bool cbc_decrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override;
bool cbc_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override { return false; }
bool cbc_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override { return false; }
};
#endif
| 1,707 | 38.72093 | 82 | h |
null | ceph-main/src/crypto/isa-l/isal_crypto_plugin.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Mirantis, Inc.
*
* Author: Adam Kupczyk <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef ISAL_CRYPTO_PLUGIN_H
#define ISAL_CRYPTO_PLUGIN_H
// -----------------------------------------------------------------------------
#include "crypto/crypto_plugin.h"
#include "crypto/isa-l/isal_crypto_accel.h"
#include "arch/intel.h"
#include "arch/probe.h"
// -----------------------------------------------------------------------------
class ISALCryptoPlugin : public CryptoPlugin {
public:
explicit ISALCryptoPlugin(CephContext* cct) : CryptoPlugin(cct)
{}
~ISALCryptoPlugin()
{}
virtual int factory(CryptoAccelRef *cs,
std::ostream *ss,
const size_t chunk_size,
const size_t max_requests)
{
if (cryptoaccel == nullptr)
{
ceph_arch_probe();
if (ceph_arch_intel_aesni && ceph_arch_intel_sse41) {
cryptoaccel = CryptoAccelRef(new ISALCryptoAccel);
}
}
*cs = cryptoaccel;
return 0;
}
};
#endif
| 1,345 | 25.92 | 80 | h |
null | ceph-main/src/crypto/openssl/openssl_crypto_accel.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Qiaowei Ren <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef OPENSSL_CRYPTO_ACCEL_H
#define OPENSSL_CRYPTO_ACCEL_H
#include "crypto/crypto_accel.h"
#include "common/async/yield_context.h"
class OpenSSLCryptoAccel : public CryptoAccel {
public:
OpenSSLCryptoAccel() {}
virtual ~OpenSSLCryptoAccel() {}
bool cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override;
bool cbc_decrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override;
bool cbc_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override { return false; }
bool cbc_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override { return false; }
};
#endif
| 1,725 | 38.227273 | 82 | h |
null | ceph-main/src/crypto/openssl/openssl_crypto_plugin.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Qiaowei Ren <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef ISAL_CRYPTO_PLUGIN_H
#define ISAL_CRYPTO_PLUGIN_H
#include "crypto/crypto_plugin.h"
#include "crypto/openssl/openssl_crypto_accel.h"
class OpenSSLCryptoPlugin : public CryptoPlugin {
CryptoAccelRef cryptoaccel;
public:
explicit OpenSSLCryptoPlugin(CephContext* cct) : CryptoPlugin(cct)
{}
int factory(CryptoAccelRef *cs,
std::ostream *ss,
const size_t chunk_size,
const size_t max_requests) override {
if (cryptoaccel == nullptr)
cryptoaccel = CryptoAccelRef(new OpenSSLCryptoAccel);
*cs = cryptoaccel;
return 0;
}
};
#endif
| 1,023 | 24.6 | 70 | h |
null | ceph-main/src/crypto/qat/qat_crypto_accel.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Qiaowei Ren <[email protected]>
* Author: Ganesh Mahalingam <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef QAT_CRYPTO_ACCEL_H
#define QAT_CRYPTO_ACCEL_H
#include "crypto/crypto_accel.h"
#include "crypto/qat/qcccrypto.h"
#include "common/async/yield_context.h"
class QccCryptoAccel : public CryptoAccel {
public:
QccCrypto qcccrypto;
QccCryptoAccel(const size_t chunk_size, const size_t max_requests):qcccrypto() { qcccrypto.init(chunk_size, max_requests); };
~QccCryptoAccel() { qcccrypto.destroy(); };
bool cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override { return false; }
bool cbc_decrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override { return false; }
bool cbc_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override;
bool cbc_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override;
};
#endif
| 1,825 | 37.851064 | 129 | h |
null | ceph-main/src/crypto/qat/qat_crypto_plugin.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Qiaowei Ren <[email protected]>
* Author: Ganesh Mahalingam <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef QAT_CRYPTO_PLUGIN_H
#define QAT_CRYPTO_PLUGIN_H
#include "crypto/crypto_plugin.h"
#include "crypto/qat/qat_crypto_accel.h"
class QccCryptoPlugin : public CryptoPlugin {
static std::mutex qat_init;
public:
explicit QccCryptoPlugin(CephContext* cct) : CryptoPlugin(cct)
{}
~QccCryptoPlugin()
{}
virtual int factory(CryptoAccelRef *cs, std::ostream *ss, const size_t chunk_size, const size_t max_requests)
{
std::lock_guard<std::mutex> l(qat_init);
if (cryptoaccel == nullptr)
cryptoaccel = CryptoAccelRef(new QccCryptoAccel(chunk_size, max_requests));
*cs = cryptoaccel;
return 0;
}
};
#endif
| 1,117 | 25 | 111 | h |
null | ceph-main/src/crypto/qat/qcccrypto.h | #ifndef QCCCRYPTO_H
#define QCCCRYPTO_H
#include <atomic>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <pthread.h>
#include <thread>
#include <mutex>
#include <queue>
#include <memory>
#include "common/async/yield_context.h"
#include <memory>
#include "common/ceph_mutex.h"
#include <vector>
#include <functional>
#include <span>
#include "boost/circular_buffer.hpp"
#include "boost/asio/thread_pool.hpp"
extern "C" {
#include "cpa.h"
#include "cpa_cy_sym_dp.h"
#include "cpa_cy_im.h"
#include "lac/cpa_cy_sym.h"
#include "lac/cpa_cy_im.h"
#include "qae_mem.h"
#include "icp_sal_user.h"
#include "icp_sal_poll.h"
#include "qae_mem_utils.h"
}
class QccCrypto {
friend class QatCrypto;
size_t chunk_size{0};
size_t max_requests{0};
boost::asio::thread_pool my_pool{1};
boost::circular_buffer<std::function<void(int)>> instance_completions;
template <typename CompletionToken>
auto async_get_instance(CompletionToken&& token);
public:
CpaCySymCipherDirection qcc_op_type;
QccCrypto() {};
~QccCrypto() { destroy(); };
bool init(const size_t chunk_size, const size_t max_requests);
bool destroy();
bool perform_op_batch(unsigned char* out, const unsigned char* in, size_t size,
Cpa8U *iv,
Cpa8U *key,
CpaCySymCipherDirection op_type,
optional_yield y);
private:
// Currently only supporting AES_256_CBC.
// To-Do: Needs to be expanded
static const size_t AES_256_IV_LEN = 16;
static const size_t AES_256_KEY_SIZE = 32;
static const size_t MAX_NUM_SYM_REQ_BATCH = 32;
/*
* Struct to hold an instance of QAT to handle the crypto operations. These
* will be identified at the start and held until the destructor is called
* To-Do:
* The struct was creating assuming that we will use all the instances.
* Expand current implementation to allow multiple instances to operate
* independently.
*/
struct QCCINST {
CpaInstanceHandle *cy_inst_handles;
CpaBoolean *is_polled;
Cpa16U num_instances;
} *qcc_inst;
/*
* QAT Crypto Session
* Crypto Session Context and setupdata holds
* priority, type of crypto operation (cipher/chained),
* cipher algorithm (AES, DES, etc),
* single crypto or multi-buffer crypto.
*/
struct QCCSESS {
Cpa32U sess_ctx_sz;
CpaCySymSessionCtx sess_ctx;
} *qcc_sess;
/*
* Cipher Memory Allocations
* Holds bufferlist, flatbuffer, cipher opration data and buffermeta needed
* by QAT to perform the operation. Also buffers for IV, SRC, DEST.
*/
struct QCCOPMEM {
// Op common items
bool is_mem_alloc;
bool op_complete;
CpaCySymDpOpData *sym_op_data[MAX_NUM_SYM_REQ_BATCH];
Cpa8U *src_buff[MAX_NUM_SYM_REQ_BATCH];
Cpa8U *iv_buff[MAX_NUM_SYM_REQ_BATCH];
} *qcc_op_mem;
/*
* Handle queue with free instances to handle op
*/
boost::circular_buffer<int> open_instances;
void QccFreeInstance(int entry);
std::thread qat_poll_thread;
bool thread_stop{false};
/*
* Contiguous Memory Allocator and de-allocator. We are using the usdm
* driver that comes along with QAT to get us direct memory access using
* hugepages.
* To-Do: A kernel based one.
*/
static inline void qcc_contig_mem_free(void **ptr) {
if (*ptr) {
qaeMemFreeNUMA(ptr);
*ptr = NULL;
}
}
static inline CpaStatus qcc_contig_mem_alloc(void **ptr, Cpa32U size, Cpa32U alignment = 1) {
*ptr = qaeMemAllocNUMA(size, 0, alignment);
if (NULL == *ptr)
{
return CPA_STATUS_RESOURCE;
}
return CPA_STATUS_SUCCESS;
}
/*
* Malloc & free calls masked to maintain consistency and future kernel
* alloc support.
*/
static inline void qcc_os_mem_free(void **ptr) {
if (*ptr) {
free(*ptr);
*ptr = NULL;
}
}
static inline CpaStatus qcc_os_mem_alloc(void **ptr, Cpa32U size) {
*ptr = malloc(size);
if (*ptr == NULL)
{
return CPA_STATUS_RESOURCE;
}
return CPA_STATUS_SUCCESS;
}
std::atomic<bool> is_init = { false };
/*
* Function to cleanup memory if constructor fails
*/
void cleanup();
/*
* Crypto Polling Function & helpers
* This helps to retrieve data from the QAT rings and dispatching the
* associated callbacks. For synchronous operation (like this one), QAT
* library creates an internal callback for the operation.
*/
void poll_instances(void);
std::atomic<size_t> poll_retry_num{0};
bool symPerformOp(int avail_inst,
CpaCySymSessionCtx sessionCtx,
const Cpa8U *pSrc,
Cpa8U *pDst,
Cpa32U size,
Cpa8U *pIv,
Cpa32U ivLen,
optional_yield y);
CpaStatus initSession(CpaInstanceHandle cyInstHandle,
CpaCySymSessionCtx *sessionCtx,
Cpa8U *pCipherKey,
CpaCySymCipherDirection cipherDirection);
CpaStatus updateSession(CpaCySymSessionCtx sessionCtx,
Cpa8U *pCipherKey,
CpaCySymCipherDirection cipherDirection);
};
class QatCrypto {
private:
std::function<void(CpaStatus stat)> completion_handler;
std::atomic<std::size_t> count;
public:
void complete() {
if (--count == 0) {
completion_handler(CPA_STATUS_SUCCESS);
}
return ;
}
QatCrypto () : count(0) {}
QatCrypto (const QatCrypto &qat) = delete;
QatCrypto (QatCrypto &&qat) = delete;
void operator=(const QatCrypto &qat) = delete;
void operator=(QatCrypto &&qat) = delete;
template <typename CompletionToken>
auto async_perform_op(int avail_inst, std::span<CpaCySymDpOpData*> pOpDataVec, CompletionToken&& token);
};
#endif //QCCCRYPTO_H
| 6,100 | 27.376744 | 106 | h |
null | ceph-main/src/dokan/ceph_dokan.h | /*
* Copyright (C) 2021 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#define CEPH_DOKAN_IO_DEFAULT_TIMEOUT 60 * 5 // Seconds
// Avoid conflicting COM types, exposed when using C++.
#define _OLE2_H_
#include <bcrypt.h> // for typedef of NTSTATUS
#include <dokan.h>
struct Config {
bool removable = false;
bool readonly = false;
bool use_win_mount_mgr = false;
bool current_session_only = false;
bool debug = false;
bool dokan_stderr = false;
int operation_timeout = CEPH_DOKAN_IO_DEFAULT_TIMEOUT;
std::wstring mountpoint = L"";
std::string root_path = "/";
std::wstring win_vol_name = L"";
unsigned long win_vol_serial = 0;
unsigned long max_path_len = 256;
mode_t file_mode = 0755;
mode_t dir_mode = 0755;
};
extern Config *g_cfg;
// TODO: list and service commands.
enum class Command {
None,
Version,
Help,
Map,
Unmap,
};
void print_usage();
int parse_args(
std::vector<const char*>& args,
std::ostream *err_msg,
Command *command, Config *cfg);
int set_dokan_options(Config *cfg, PDOKAN_OPTIONS dokan_options);
| 1,274 | 20.982759 | 65 | h |
null | ceph-main/src/dokan/dbg.h | // Various helpers used for debugging purposes, such as functions
// logging certain flags. Since those can be rather verbose, it's
// better if we keep them separate.
#ifndef CEPH_DOKAN_DBG_H
#define CEPH_DOKAN_DBG_H
#include "include/compat.h"
#include <sstream>
#include "ceph_dokan.h"
void print_credentials(
std::ostringstream& Stream,
PDOKAN_FILE_INFO DokanFileInfo);
void print_open_params(
LPCSTR FilePath,
ACCESS_MASK AccessMode,
DWORD FlagsAndAttributes,
ULONG ShareMode,
DWORD CreationDisposition,
ULONG CreateOptions,
PDOKAN_FILE_INFO DokanFileInfo);
#endif // CEPH_DOKAN_DBG_H
| 614 | 21.777778 | 65 | h |
null | ceph-main/src/erasure-code/ErasureCode.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_H
#define CEPH_ERASURE_CODE_H
/*! @file ErasureCode.h
@brief Base class for erasure code plugins implementors
*/
#include "ErasureCodeInterface.h"
namespace ceph {
class ErasureCode : public ErasureCodeInterface {
public:
static const unsigned SIMD_ALIGN;
std::vector<int> chunk_mapping;
ErasureCodeProfile _profile;
// for CRUSH rule
std::string rule_root;
std::string rule_failure_domain;
std::string rule_device_class;
~ErasureCode() override {}
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
const ErasureCodeProfile &get_profile() const override {
return _profile;
}
int create_rule(const std::string &name,
CrushWrapper &crush,
std::ostream *ss) const override;
int sanity_check_k_m(int k, int m, std::ostream *ss);
unsigned int get_coding_chunk_count() const override {
return get_chunk_count() - get_data_chunk_count();
}
virtual int get_sub_chunk_count() override {
return 1;
}
virtual int _minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available_chunks,
std::set<int> *minimum);
int minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::map<int, std::vector<std::pair<int, int>>> *minimum) override;
int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) override;
int encode_prepare(const bufferlist &raw,
std::map<int, bufferlist> &encoded) const;
int encode(const std::set<int> &want_to_encode,
const bufferlist &in,
std::map<int, bufferlist> *encoded) override;
int decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded, int chunk_size) override;
virtual int _decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded);
const std::vector<int> &get_chunk_mapping() const override;
int to_mapping(const ErasureCodeProfile &profile,
std::ostream *ss);
static int to_int(const std::string &name,
ErasureCodeProfile &profile,
int *value,
const std::string &default_value,
std::ostream *ss);
static int to_bool(const std::string &name,
ErasureCodeProfile &profile,
bool *value,
const std::string &default_value,
std::ostream *ss);
static int to_string(const std::string &name,
ErasureCodeProfile &profile,
std::string *value,
const std::string &default_value,
std::ostream *ss);
int decode_concat(const std::map<int, bufferlist> &chunks,
bufferlist *decoded) override;
protected:
int parse(const ErasureCodeProfile &profile,
std::ostream *ss);
private:
int chunk_index(unsigned int i) const;
};
}
#endif
| 3,679 | 28.206349 | 80 | h |
null | ceph-main/src/erasure-code/ErasureCodeInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_INTERFACE_H
#define CEPH_ERASURE_CODE_INTERFACE_H
/*! @file ErasureCodeInterface.h
@brief Interface provided by erasure code plugins
The erasure coded pools rely on plugins implementing
**ErasureCodeInterface** to encode and decode content. All codes
are systematic (i.e. the data is not mangled and can be
reconstructed by concatenating chunks ).
Methods returning an **int** return **0** on success and a
negative value on error. If the value returned on error is not
explained in **ErasureCodeInterface**, the sources or the
documentation of the interface implementer (i.e. the plugin ) must
be read to figure out what it means. It is recommended that each
error code matches an *errno* value that relates to the cause of
the error.
If an object is small enough, the caller can process it with
one call to the **encode** or **decode** method.
+---------------- coded object O -------------------------+
|+----------------+ +----------------+ +----------------+ |
|| chunk 0 | | chunk 1 | | chunk 2 | |
|| [0,N) | | [N,2N) | | [2N,3N) | |
|+----------------+ +----------------+ +----------------+ |
+------^--------------------------------------------------+
|
chunk B / C | offset B % C ( where C is the chunk size )
|
+-----^---- raw object O ----+------+
| B [0,X) | pad |
+----------------------------+------+
The object size is paded so that each chunks are of the same size.
In the example above, if the actual object size was X, then it
will be padded to 2N >= X assuming there are two data chunks (0
and 1) and one coding chunk (2).
For chunks of size C, byte B of the object is found in chunk number
B / C at offset B % C.
If an object is too large to be encoded in memory, the caller
should divide it in smaller units named **stripes**.
+---------------------- object O -------------------------+
|+----------------+ +----------------+ +----------------+ |
stripe || chunk 0 | | chunk 1 | | chunk 2 | |
0 || [0,N) | | [N,2N) | | [2N,3N) | |
|+----------------+ +----------------+ +----------------+ |
|+----------------+ +----------------+ +----------------+ |
stripe || chunk 0 | | chunk 1 | | chunk 2 | |
1 || [X,M) | | [X+M,X+2M) | | [X+2M,X+3M) | |
|| | | | | | |
|+----------------+ +----------------+ +----------------+ |
| ... |
+---------------------------------------------------------+
The interface does not concern itself with stripes nor does it
impose constraints on the size of each stripe. Variable names in
the interface always use **object** and never use **stripe**.
Assuming the interface implementer provides three data chunks ( K
= 3 ) and two coding chunks ( M = 2 ), a buffer could be encoded as
follows:
~~~~~~~~~~~~~~~~{.c}
set<int> want_to_encode(0, 1, 2, // data chunks
3, 4 // coding chunks
);
bufferlist in = "ABCDEF";
map<int, bufferlist> encoded
encode(want_to_encode, in, &encoded);
encoded[0] == "AB" // data chunk 0
encoded[1] == "CD" // data chunk 1
encoded[2] == "EF" // data chunk 2
encoded[3] // coding chunk 0
encoded[4] // coding chunk 1
~~~~~~~~~~~~~~~~
The **minimum_to_decode_with_cost** method can be used to minimize
the cost of fetching the chunks necessary to retrieve a given
content. For instance, if encoded[2] (contained **EF**) is missing
and accessing encoded[3] (the first coding chunk) is more
expensive than accessing encoded[4] (the second coding chunk),
**minimum_to_decode_with_cost** is expected to chose the first
coding chunk.
~~~~~~~~~~~~~~~~{.c}
set<int> want_to_read(2); // want the chunk containing "EF"
map<int,int> available(
0 => 1, // data chunk 0 : available and costs 1
1 => 1, // data chunk 1 : available and costs 1
// data chunk 2 : missing
3 => 9, // coding chunk 1 : available and costs 9
4 => 1, // coding chunk 2 : available and costs 1
);
set<int> minimum;
minimum_to_decode_with_cost(want_to_read,
available,
&minimum);
minimum == set<int>(0, 1, 4); // NOT set<int>(0, 1, 3);
~~~~~~~~~~~~~~~~
It sets **minimum** with three chunks to reconstruct the desired
data chunk and will pick the second coding chunk ( 4 ) because it
is less expensive ( 1 < 9 ) to retrieve than the first coding
chunk ( 3 ). The caller is responsible for retrieving the chunks
and call **decode** to reconstruct the second data chunk.
~~~~~~~~~~~~~~~~{.c}
map<int,bufferlist> chunks;
for i in minimum.keys():
chunks[i] = fetch_chunk(i); // get chunk from storage
map<int, bufferlist> decoded;
decode(want_to_read, chunks, &decoded);
decoded[2] == "EF"
~~~~~~~~~~~~~~~~
The semantic of the cost value is defined by the caller and must
be known to the implementer. For instance, it may be more
expensive to retrieve two chunks with cost 1 + 9 = 10 than two
chunks with cost 6 + 6 = 12.
*/
#include <map>
#include <set>
#include <vector>
#include <ostream>
#include <memory>
#include <string>
#include "include/buffer_fwd.h"
class CrushWrapper;
namespace ceph {
typedef std::map<std::string,std::string> ErasureCodeProfile;
inline std::ostream& operator<<(std::ostream& out, const ErasureCodeProfile& profile) {
out << "{";
for (ErasureCodeProfile::const_iterator it = profile.begin();
it != profile.end();
++it) {
if (it != profile.begin()) out << ",";
out << it->first << "=" << it->second;
}
out << "}";
return out;
}
class ErasureCodeInterface {
public:
virtual ~ErasureCodeInterface() {}
/**
* Initialize the instance according to the content of
* **profile**. The **ss** stream is set with debug messages or
* error messages, the content of which depend on the
* implementation.
*
* Return 0 on success or a negative errno on error. When
* returning on error, the implementation is expected to
* provide a human readable explanation in **ss**.
*
* @param [in] profile a key/value map
* @param [out] ss contains informative messages when an error occurs
* @return 0 on success or a negative errno on error.
*/
virtual int init(ErasureCodeProfile &profile, std::ostream *ss) = 0;
/**
* Return the profile that was used to initialize the instance
* with the **init** method.
*
* @return the profile in use by the instance
*/
virtual const ErasureCodeProfile &get_profile() const = 0;
/**
* Create a new rule in **crush** under the name **name**,
* unless it already exists.
*
* Return the rule number that was created on success. If a
* rule **name** already exists, return -EEXISTS, otherwise
* return a negative value indicating an error with a semantic
* defined by the implementation.
*
* @param [in] name of the rule to create
* @param [in] crush crushmap in which the rule is created
* @param [out] ss contains informative messages when an error occurs
* @return a rule on success or a negative errno on error.
*/
virtual int create_rule(const std::string &name,
CrushWrapper &crush,
std::ostream *ss) const = 0;
/**
* Return the number of chunks created by a call to the **encode**
* method.
*
* In the simplest case it can be K + M, i.e. the number
* of data chunks (K) plus the number of parity chunks
* (M). However, if the implementation provides local parity there
* could be an additional overhead.
*
* @return the number of chunks created by encode()
*/
virtual unsigned int get_chunk_count() const = 0;
/**
* Return the number of data chunks created by a call to the
* **encode** method. The data chunks contain the buffer provided
* to **encode**, verbatim, with padding at the end of the last
* chunk.
*
* @return the number of data chunks created by encode()
*/
virtual unsigned int get_data_chunk_count() const = 0;
/**
* Return the number of coding chunks created by a call to the
* **encode** method. The coding chunks are used to recover from
* the loss of one or more chunks. If there is one coding chunk,
* it is possible to recover from the loss of exactly one
* chunk. If there are two coding chunks, it is possible to
* recover from the loss of at most two chunks, etc.
*
* @return the number of coding chunks created by encode()
*/
virtual unsigned int get_coding_chunk_count() const = 0;
/**
* Return the number of sub chunks chunks created by a call to the
* **encode** method. Each chunk can be viewed as union of sub-chunks
* For the case of array codes, the sub-chunk count > 1, where as the
* scalar codes have sub-chunk count = 1.
*
* @return the number of sub-chunks per chunk created by encode()
*/
virtual int get_sub_chunk_count() = 0;
/**
* Return the size (in bytes) of a single chunk created by a call
* to the **decode** method. The returned size multiplied by
* **get_chunk_count()** is greater or equal to **object_size**.
*
* If the object size is properly aligned, the chunk size is
* **object_size / get_chunk_count()**. However, if
* **object_size** is not a multiple of **get_chunk_count** or if
* the implementation imposes additional alignment constraints,
* the chunk size may be larger.
*
* The byte found at offset **B** of the original object is mapped
* to chunk **B / get_chunk_size()** at offset **B % get_chunk_size()**.
*
* @param [in] object_size the number of bytes of the object to **encode()**
* @return the size (in bytes) of a single chunk created by **encode()**
*/
virtual unsigned int get_chunk_size(unsigned int object_size) const = 0;
/**
* Compute the smallest subset of **available** chunks that needs
* to be retrieved in order to successfully decode
* **want_to_read** chunks.
*
* It is strictly equivalent to calling
* **minimum_to_decode_with_cost** where each **available** chunk
* has the same cost.
*
* @see minimum_to_decode_with_cost
*
* @param [in] want_to_read chunk indexes to be decoded
* @param [in] available chunk indexes containing valid data
* @param [out] minimum chunk indexes and corresponding
* subchunk index offsets, count.
* @return **0** on success or a negative errno on error.
*/
virtual int minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::map<int, std::vector<std::pair<int, int>>>
*minimum) = 0;
/**
* Compute the smallest subset of **available** chunks that needs
* to be retrieved in order to successfully decode
* **want_to_read** chunks. If there are more than one possible
* subset, select the subset that minimizes the overall retrieval
* cost.
*
* The **available** parameter maps chunk indexes to their
* retrieval cost. The higher the cost value, the more costly it
* is to retrieve the chunk content.
*
* Returns -EIO if there are not enough chunk indexes in
* **available** to decode **want_to_read**.
*
* Returns 0 on success.
*
* The **minimum** argument must be a pointer to an empty set.
*
* @param [in] want_to_read chunk indexes to be decoded
* @param [in] available map chunk indexes containing valid data
* to their retrieval cost
* @param [out] minimum chunk indexes to retrieve
* @return **0** on success or a negative errno on error.
*/
virtual int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) = 0;
/**
* Encode the content of **in** and store the result in
* **encoded**. All buffers pointed to by **encoded** have the
* same size. The **encoded** map contains at least all chunk
* indexes found in the **want_to_encode** set.
*
* The **encoded** map is expected to be a pointer to an empty
* map.
*
* Assuming the **in** parameter is **length** bytes long,
* the concatenation of the first **length** bytes of the
* **encoded** buffers is equal to the content of the **in**
* parameter.
*
* The **encoded** map may contain more chunks than required by
* **want_to_encode** and the caller is expected to permanently
* store all of them, not just the chunks listed in
* **want_to_encode**.
*
* The **encoded** map may contain pointers to data stored in
* the **in** parameter. If the caller modifies the content of
* **in** after calling the encode method, it may have a side
* effect on the content of **encoded**.
*
* The **encoded** map may contain pointers to buffers allocated
* by the encode method. They will be freed when **encoded** is
* freed. The allocation method is not specified.
*
* Returns 0 on success.
*
* @param [in] want_to_encode chunk indexes to be encoded
* @param [in] in data to be encoded
* @param [out] encoded map chunk indexes to chunk data
* @return **0** on success or a negative errno on error.
*/
virtual int encode(const std::set<int> &want_to_encode,
const bufferlist &in,
std::map<int, bufferlist> *encoded) = 0;
virtual int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, bufferlist> *encoded) = 0;
/**
* Decode the **chunks** and store at least **want_to_read**
* chunks in **decoded**.
*
* The **decoded** map must be a pointer to an empty map.
*
* There must be enough **chunks** ( as returned by
* **minimum_to_decode** or **minimum_to_decode_with_cost** ) to
* perform a successful decoding of all chunks listed in
* **want_to_read**.
*
* All buffers pointed by **in** must have the same size.
*
* On success, the **decoded** map may contain more chunks than
* required by **want_to_read** and they can safely be used by the
* caller.
*
* If a chunk is listed in **want_to_read** and there is a
* corresponding **bufferlist** in **chunks**, it will be
* referenced in **decoded**. If not it will be reconstructed from
* the existing chunks.
*
* Because **decoded** may contain pointers to data found in
* **chunks**, modifying the content of **chunks** after calling
* decode may have a side effect on the content of **decoded**.
*
* Returns 0 on success.
*
* @param [in] want_to_read chunk indexes to be decoded
* @param [in] chunks map chunk indexes to chunk data
* @param [out] decoded map chunk indexes to chunk data
* @param [in] chunk_size chunk size
* @return **0** on success or a negative errno on error.
*/
virtual int decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded, int chunk_size) = 0;
virtual int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded) = 0;
/**
* Return the ordered list of chunks or an empty vector
* if no remapping is necessary.
*
* By default encoding an object with K=2,M=1 will create three
* chunks, the first two are data and the last one coding. For
* a 10MB object, it would be:
*
* chunk 0 for the first 5MB
* chunk 1 for the last 5MB
* chunk 2 for the 5MB coding chunk
*
* The plugin may, however, decide to remap them in a different
* order, such as:
*
* chunk 0 for the last 5MB
* chunk 1 for the 5MB coding chunk
* chunk 2 for the first 5MB
*
* The vector<int> remaps the chunks so that the first chunks are
* data, in sequential order, and the last chunks contain parity
* in the same order as they were output by the encoding function.
*
* In the example above the mapping would be:
*
* [ 1, 2, 0 ]
*
* The returned vector<int> only contains information for chunks
* that need remapping. If no remapping is necessary, the
* vector<int> is empty.
*
* @return vector<int> list of indices of chunks to be remapped
*/
virtual const std::vector<int> &get_chunk_mapping() const = 0;
/**
* Decode the first **get_data_chunk_count()** **chunks** and
* concatenate them into **decoded**.
*
* Returns 0 on success.
*
* @param [in] chunks map chunk indexes to chunk data
* @param [out] decoded concatenante of the data chunks
* @return **0** on success or a negative errno on error.
*/
virtual int decode_concat(const std::map<int, bufferlist> &chunks,
bufferlist *decoded) = 0;
};
typedef std::shared_ptr<ErasureCodeInterface> ErasureCodeInterfaceRef;
}
#endif
| 18,567 | 38.590618 | 89 | h |
null | ceph-main/src/erasure-code/ErasureCodePlugin.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_H
#define CEPH_ERASURE_CODE_PLUGIN_H
#include "common/ceph_mutex.h"
#include "ErasureCodeInterface.h"
extern "C" {
const char *__erasure_code_version();
int __erasure_code_init(char *plugin_name, char *directory);
}
namespace ceph {
class ErasureCodePlugin {
public:
void *library;
ErasureCodePlugin() :
library(0) {}
virtual ~ErasureCodePlugin() {}
virtual int factory(const std::string &directory,
ErasureCodeProfile &profile,
ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) = 0;
};
class ErasureCodePluginRegistry {
public:
ceph::mutex lock = ceph::make_mutex("ErasureCodePluginRegistry::lock");
bool loading = false;
bool disable_dlclose = false;
std::map<std::string,ErasureCodePlugin*> plugins;
static ErasureCodePluginRegistry singleton;
ErasureCodePluginRegistry();
~ErasureCodePluginRegistry();
static ErasureCodePluginRegistry &instance() {
return singleton;
}
int factory(const std::string &plugin,
const std::string &directory,
ErasureCodeProfile &profile,
ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss);
int add(const std::string &name, ErasureCodePlugin *plugin);
int remove(const std::string &name);
ErasureCodePlugin *get(const std::string &name);
int load(const std::string &plugin_name,
const std::string &directory,
ErasureCodePlugin **plugin,
std::ostream *ss);
int preload(const std::string &plugins,
const std::string &directory,
std::ostream *ss);
};
}
#endif
| 2,197 | 25.481928 | 75 | h |
null | ceph-main/src/erasure-code/clay/ErasureCodeClay.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Indian Institute of Science <[email protected]>
*
* Author: Myna Vajha <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_CLAY_H
#define CEPH_ERASURE_CODE_CLAY_H
#include "include/err.h"
#include "include/buffer_fwd.h"
#include "erasure-code/ErasureCode.h"
class ErasureCodeClay final : public ceph::ErasureCode {
public:
std::string DEFAULT_K{"4"};
std::string DEFAULT_M{"2"};
std::string DEFAULT_W{"8"};
int k = 0, m = 0, d = 0, w = 8;
int q = 0, t = 0, nu = 0;
int sub_chunk_no = 0;
std::map<int, ceph::bufferlist> U_buf;
struct ScalarMDS {
ceph::ErasureCodeInterfaceRef erasure_code;
ceph::ErasureCodeProfile profile;
};
ScalarMDS mds;
ScalarMDS pft;
const std::string directory;
explicit ErasureCodeClay(const std::string& dir)
: directory(dir)
{}
~ErasureCodeClay() override;
unsigned int get_chunk_count() const override {
return k+m;
}
unsigned int get_data_chunk_count() const override {
return k;
}
int get_sub_chunk_count() override {
return sub_chunk_no;
}
unsigned int get_chunk_size(unsigned int object_size) const override;
int minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::map<int, std::vector<std::pair<int, int>>> *minimum) override;
int decode(const std::set<int> &want_to_read,
const std::map<int, ceph::bufferlist> &chunks,
std::map<int, ceph::bufferlist> *decoded, int chunk_size) override;
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::bufferlist> *encoded) override;
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::bufferlist> &chunks,
std::map<int, ceph::bufferlist> *decoded) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
int is_repair(const std::set<int> &want_to_read,
const std::set<int> &available_chunks);
int get_repair_sub_chunk_count(const std::set<int> &want_to_read);
virtual int parse(ceph::ErasureCodeProfile &profile, std::ostream *ss);
private:
int minimum_to_repair(const std::set<int> &want_to_read,
const std::set<int> &available_chunks,
std::map<int, std::vector<std::pair<int, int>>> *minimum);
int repair(const std::set<int> &want_to_read,
const std::map<int, ceph::bufferlist> &chunks,
std::map<int, ceph::bufferlist> *recovered, int chunk_size);
int decode_layered(std::set<int>& erased_chunks, std::map<int, ceph::bufferlist>* chunks);
int repair_one_lost_chunk(std::map<int, ceph::bufferlist> &recovered_data, std::set<int> &aloof_nodes,
std::map<int, ceph::bufferlist> &helper_data, int repair_blocksize,
std::vector<std::pair<int,int>> &repair_sub_chunks_ind);
void get_repair_subchunks(const int &lost_node,
std::vector<std::pair<int, int>> &repair_sub_chunks_ind);
int decode_erasures(const std::set<int>& erased_chunks, int z,
std::map<int, ceph::bufferlist>* chunks, int sc_size);
int decode_uncoupled(const std::set<int>& erasures, int z, int ss_size);
void set_planes_sequential_decoding_order(int* order, std::set<int>& erasures);
void recover_type1_erasure(std::map<int, ceph::bufferlist>* chunks, int x, int y, int z,
int* z_vec, int sc_size);
void get_uncoupled_from_coupled(std::map<int, ceph::bufferlist>* chunks, int x, int y, int z,
int* z_vec, int sc_size);
void get_coupled_from_uncoupled(std::map<int, ceph::bufferlist>* chunks, int x, int y, int z,
int* z_vec, int sc_size);
void get_plane_vector(int z, int* z_vec);
int get_max_iscore(std::set<int>& erased_chunks);
};
#endif
| 4,307 | 32.92126 | 104 | h |
null | ceph-main/src/erasure-code/clay/ErasureCodePluginClay.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2018 Indian Institute of Science <[email protected]>
*
* Author: Myna Vajha <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_CLAY_H
#define CEPH_ERASURE_CODE_PLUGIN_CLAY_H
#include "erasure-code/ErasureCodePlugin.h"
class ErasureCodePluginClay : public ceph::ErasureCodePlugin {
public:
int factory(const std::string& directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) override;
};
#endif
| 920 | 28.709677 | 73 | h |
null | ceph-main/src/erasure-code/isa/ErasureCodeIsa.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CERN (Switzerland)
*
* Author: Andreas-Joachim Peters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
/**
* @file ErasureCodeIsa.cc
*
* @brief Erasure Code CODEC using the INTEL ISA-L library.
*
* The INTEL ISA-L library supports two pre-defined encoding matrices (cauchy = default, reed_sol_van = default)
* The default CODEC implementation using these two matrices is implemented in class ErasureCodeIsaDefault.
* ISA-L allows to use custom matrices which might be added later as implementations deriving from the base class ErasoreCodeIsa.
*/
#ifndef CEPH_ERASURE_CODE_ISA_L_H
#define CEPH_ERASURE_CODE_ISA_L_H
// -----------------------------------------------------------------------------
#include "erasure-code/ErasureCode.h"
#include "ErasureCodeIsaTableCache.h"
// -----------------------------------------------------------------------------
class ErasureCodeIsa : public ceph::ErasureCode {
public:
enum eMatrix {
kVandermonde = 0, kCauchy = 1
};
int k;
int m;
int w;
ErasureCodeIsaTableCache &tcache;
const char *technique;
ErasureCodeIsa(const char *_technique,
ErasureCodeIsaTableCache &_tcache) :
k(0),
m(0),
w(0),
tcache(_tcache),
technique(_technique)
{
}
~ErasureCodeIsa() override
{
}
unsigned int
get_chunk_count() const override
{
return k + m;
}
unsigned int
get_data_chunk_count() const override
{
return k;
}
unsigned int get_chunk_size(unsigned int object_size) const override;
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::buffer::list> *encoded) override;
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
virtual void isa_encode(char **data,
char **coding,
int blocksize) = 0;
virtual int isa_decode(int *erasures,
char **data,
char **coding,
int blocksize) = 0;
virtual unsigned get_alignment() const = 0;
virtual void prepare() = 0;
private:
virtual int parse(ceph::ErasureCodeProfile &profile,
std::ostream *ss) = 0;
};
// -----------------------------------------------------------------------------
class ErasureCodeIsaDefault : public ErasureCodeIsa {
private:
int matrixtype;
public:
static const std::string DEFAULT_K;
static const std::string DEFAULT_M;
unsigned char* encode_coeff; // encoding coefficient
unsigned char* encode_tbls; // encoding table
ErasureCodeIsaDefault(ErasureCodeIsaTableCache &_tcache,
int matrix = kVandermonde) :
ErasureCodeIsa("default", _tcache),
encode_coeff(0), encode_tbls(0)
{
matrixtype = matrix;
}
~ErasureCodeIsaDefault() override
{
}
void isa_encode(char **data,
char **coding,
int blocksize) override;
virtual bool erasure_contains(int *erasures, int i);
int isa_decode(int *erasures,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
void prepare() override;
private:
int parse(ceph::ErasureCodeProfile &profile,
std::ostream *ss) override;
};
#endif
| 3,893 | 24.285714 | 129 | h |
null | ceph-main/src/erasure-code/isa/ErasureCodeIsaTableCache.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CERN (Switzerland)
*
* Author: Andreas-Joachim Peters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
/**
* @file ErasureCodeIsaTableCache.h
*
* @brief Erasure Code Isa CODEC Table Cache
*
* The INTEL ISA-L library supports two pre-defined encoding matrices (cauchy = default, reed_sol_van = default)
* The default CODEC implementation using these two matrices is implemented in class ErasureCodeIsaDefault.
* ISA-L allows to use custom matrices which might be added later as implementations deriving from the base class ErasoreCodeIsa.
*/
#ifndef CEPH_ERASURE_CODE_ISA_TABLE_CACHE_H
#define CEPH_ERASURE_CODE_ISA_TABLE_CACHE_H
// -----------------------------------------------------------------------------
#include "common/ceph_mutex.h"
#include "erasure-code/ErasureCodeInterface.h"
// -----------------------------------------------------------------------------
#include <list>
// -----------------------------------------------------------------------------
class ErasureCodeIsaTableCache {
// ---------------------------------------------------------------------------
// This class implements a table cache for encoding and decoding matrices.
// Encoding matrices are shared for the same (k,m) combination. It supplies
// a decoding matrix lru cache which is shared for identical
// matrix types e.g. there is one cache (lru-list + lru-map) for Cauchy and
// one for Vandermonde matrices!
// ---------------------------------------------------------------------------
public:
// the cache size is sufficient up to (12,4) decodings
static const int decoding_tables_lru_length = 2516;
typedef std::pair<std::list<std::string>::iterator, ceph::buffer::ptr> lru_entry_t;
typedef std::map< int, unsigned char** > codec_table_t;
typedef std::map< int, codec_table_t > codec_tables_t;
typedef std::map< int, codec_tables_t > codec_technique_tables_t;
typedef std::map< std::string, lru_entry_t > lru_map_t;
typedef std::list< std::string > lru_list_t;
ErasureCodeIsaTableCache() = default;
virtual ~ErasureCodeIsaTableCache();
// mutex used to protect modifications in encoding/decoding table maps
ceph::mutex codec_tables_guard = ceph::make_mutex("isa-lru-cache");
bool getDecodingTableFromCache(std::string &signature,
unsigned char* &table,
int matrixtype,
int k,
int m);
void putDecodingTableToCache(std::string&,
unsigned char*&,
int matrixtype,
int k,
int m);
unsigned char** getEncodingTable(int matrix, int k, int m);
unsigned char** getEncodingCoefficient(int matrix, int k, int m);
unsigned char** getEncodingTableNoLock(int matrix, int k, int m);
unsigned char** getEncodingCoefficientNoLock(int matrix, int k, int m);
unsigned char* setEncodingTable(int matrix, int k, int m, unsigned char*);
unsigned char* setEncodingCoefficient(int matrix, int k, int m, unsigned char*);
int getDecodingTableCacheSize(int matrixtype = 0);
private:
codec_technique_tables_t encoding_coefficient; // encoding coefficients accessed via table[matrix][k][m]
codec_technique_tables_t encoding_table; // encoding coefficients accessed via table[matrix][k][m]
std::map<int, lru_map_t*> decoding_tables; // decoding table cache accessed via map[matrixtype]
std::map<int, lru_list_t*> decoding_tables_lru; // decoding table lru list accessed via list[matrixtype]
lru_map_t* getDecodingTables(int matrix_type);
lru_list_t* getDecodingTablesLru(int matrix_type);
ceph::mutex* getLock();
};
#endif
| 4,080 | 38.240385 | 129 | h |
null | ceph-main/src/erasure-code/isa/ErasureCodePluginIsa.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_ISA_H
#define CEPH_ERASURE_CODE_PLUGIN_ISA_H
#include "erasure-code/ErasureCodePlugin.h"
#include "ErasureCodeIsaTableCache.h"
class ErasureCodePluginIsa : public ceph::ErasureCodePlugin {
public:
ErasureCodeIsaTableCache tcache;
int factory(const std::string &directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) override;
};
#endif
| 1,030 | 28.457143 | 71 | h |
null | ceph-main/src/erasure-code/jerasure/ErasureCodeJerasure.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013, 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_JERASURE_H
#define CEPH_ERASURE_CODE_JERASURE_H
#include "erasure-code/ErasureCode.h"
class ErasureCodeJerasure : public ceph::ErasureCode {
public:
int k;
std::string DEFAULT_K;
int m;
std::string DEFAULT_M;
int w;
std::string DEFAULT_W;
const char *technique;
std::string rule_root;
std::string rule_failure_domain;
bool per_chunk_alignment;
explicit ErasureCodeJerasure(const char *_technique) :
k(0),
DEFAULT_K("2"),
m(0),
DEFAULT_M("1"),
w(0),
DEFAULT_W("8"),
technique(_technique),
per_chunk_alignment(false)
{}
~ErasureCodeJerasure() override {}
unsigned int get_chunk_count() const override {
return k + m;
}
unsigned int get_data_chunk_count() const override {
return k;
}
unsigned int get_chunk_size(unsigned int object_size) const override;
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::buffer::list> *encoded) override;
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
virtual void jerasure_encode(char **data,
char **coding,
int blocksize) = 0;
virtual int jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize) = 0;
virtual unsigned get_alignment() const = 0;
virtual void prepare() = 0;
static bool is_prime(int value);
protected:
virtual int parse(ceph::ErasureCodeProfile &profile, std::ostream *ss);
};
class ErasureCodeJerasureReedSolomonVandermonde : public ErasureCodeJerasure {
public:
int *matrix;
ErasureCodeJerasureReedSolomonVandermonde() :
ErasureCodeJerasure("reed_sol_van"),
matrix(0)
{
DEFAULT_K = "7";
DEFAULT_M = "3";
DEFAULT_W = "8";
}
~ErasureCodeJerasureReedSolomonVandermonde() override {
if (matrix)
free(matrix);
}
void jerasure_encode(char **data,
char **coding,
int blocksize) override;
int jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
void prepare() override;
private:
int parse(ceph::ErasureCodeProfile& profile, std::ostream *ss) override;
};
class ErasureCodeJerasureReedSolomonRAID6 : public ErasureCodeJerasure {
public:
int *matrix;
ErasureCodeJerasureReedSolomonRAID6() :
ErasureCodeJerasure("reed_sol_r6_op"),
matrix(0)
{
DEFAULT_K = "7";
DEFAULT_M = "2";
DEFAULT_W = "8";
}
~ErasureCodeJerasureReedSolomonRAID6() override {
if (matrix)
free(matrix);
}
void jerasure_encode(char **data,
char **coding,
int blocksize) override;
int jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
void prepare() override;
private:
int parse(ceph::ErasureCodeProfile& profile, std::ostream *ss) override;
};
#define DEFAULT_PACKETSIZE "2048"
class ErasureCodeJerasureCauchy : public ErasureCodeJerasure {
public:
int *bitmatrix;
int **schedule;
int packetsize;
explicit ErasureCodeJerasureCauchy(const char *technique) :
ErasureCodeJerasure(technique),
bitmatrix(0),
schedule(0),
packetsize(0)
{
DEFAULT_K = "7";
DEFAULT_M = "3";
DEFAULT_W = "8";
}
~ErasureCodeJerasureCauchy() override;
void jerasure_encode(char **data,
char **coding,
int blocksize) override;
int jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
void prepare_schedule(int *matrix);
private:
int parse(ceph::ErasureCodeProfile& profile, std::ostream *ss) override;
};
class ErasureCodeJerasureCauchyOrig : public ErasureCodeJerasureCauchy {
public:
ErasureCodeJerasureCauchyOrig() :
ErasureCodeJerasureCauchy("cauchy_orig")
{}
void prepare() override;
};
class ErasureCodeJerasureCauchyGood : public ErasureCodeJerasureCauchy {
public:
ErasureCodeJerasureCauchyGood() :
ErasureCodeJerasureCauchy("cauchy_good")
{}
void prepare() override;
};
class ErasureCodeJerasureLiberation : public ErasureCodeJerasure {
public:
int *bitmatrix;
int **schedule;
int packetsize;
explicit ErasureCodeJerasureLiberation(const char *technique = "liberation") :
ErasureCodeJerasure(technique),
bitmatrix(0),
schedule(0),
packetsize(0)
{
DEFAULT_K = "2";
DEFAULT_M = "2";
DEFAULT_W = "7";
}
~ErasureCodeJerasureLiberation() override;
void jerasure_encode(char **data,
char **coding,
int blocksize) override;
int jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
virtual bool check_k(std::ostream *ss) const;
virtual bool check_w(std::ostream *ss) const;
virtual bool check_packetsize_set(std::ostream *ss) const;
virtual bool check_packetsize(std::ostream *ss) const;
virtual int revert_to_default(ceph::ErasureCodeProfile& profile,
std::ostream *ss);
void prepare() override;
private:
int parse(ceph::ErasureCodeProfile& profile, std::ostream *ss) override;
};
class ErasureCodeJerasureBlaumRoth : public ErasureCodeJerasureLiberation {
public:
ErasureCodeJerasureBlaumRoth() :
ErasureCodeJerasureLiberation("blaum_roth")
{
}
bool check_w(std::ostream *ss) const override;
void prepare() override;
};
class ErasureCodeJerasureLiber8tion : public ErasureCodeJerasureLiberation {
public:
ErasureCodeJerasureLiber8tion() :
ErasureCodeJerasureLiberation("liber8tion")
{
DEFAULT_K = "2";
DEFAULT_M = "2";
DEFAULT_W = "8";
}
void prepare() override;
private:
int parse(ceph::ErasureCodeProfile& profile, std::ostream *ss) override;
};
#endif
| 7,164 | 26.988281 | 80 | h |
null | ceph-main/src/erasure-code/jerasure/ErasureCodePluginJerasure.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_JERASURE_H
#define CEPH_ERASURE_CODE_PLUGIN_JERASURE_H
#include "erasure-code/ErasureCodePlugin.h"
class ErasureCodePluginJerasure : public ceph::ErasureCodePlugin {
public:
int factory(const std::string& directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) override;
};
#endif
| 971 | 29.375 | 71 | h |
null | ceph-main/src/erasure-code/jerasure/jerasure_init.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013, 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_JERASURE_INIT_H
#define CEPH_JERASURE_INIT_H
extern "C" int jerasure_init(int count, int *words);
#endif
| 705 | 27.24 | 71 | h |
null | ceph-main/src/erasure-code/lrc/ErasureCodeLrc.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_LRC_H
#define CEPH_ERASURE_CODE_LRC_H
#include "include/err.h"
#include "json_spirit/json_spirit.h"
#include "erasure-code/ErasureCode.h"
#define ERROR_LRC_ARRAY -(MAX_ERRNO + 1)
#define ERROR_LRC_OBJECT -(MAX_ERRNO + 2)
#define ERROR_LRC_INT -(MAX_ERRNO + 3)
#define ERROR_LRC_STR -(MAX_ERRNO + 4)
#define ERROR_LRC_PLUGIN -(MAX_ERRNO + 5)
#define ERROR_LRC_DESCRIPTION -(MAX_ERRNO + 6)
#define ERROR_LRC_PARSE_JSON -(MAX_ERRNO + 7)
#define ERROR_LRC_MAPPING -(MAX_ERRNO + 8)
#define ERROR_LRC_MAPPING_SIZE -(MAX_ERRNO + 9)
#define ERROR_LRC_FIRST_MAPPING -(MAX_ERRNO + 10)
#define ERROR_LRC_COUNT_CONSTRAINT -(MAX_ERRNO + 11)
#define ERROR_LRC_CONFIG_OPTIONS -(MAX_ERRNO + 12)
#define ERROR_LRC_LAYERS_COUNT -(MAX_ERRNO + 13)
#define ERROR_LRC_RULE_OP -(MAX_ERRNO + 14)
#define ERROR_LRC_RULE_TYPE -(MAX_ERRNO + 15)
#define ERROR_LRC_RULE_N -(MAX_ERRNO + 16)
#define ERROR_LRC_ALL_OR_NOTHING -(MAX_ERRNO + 17)
#define ERROR_LRC_GENERATED -(MAX_ERRNO + 18)
#define ERROR_LRC_K_M_MODULO -(MAX_ERRNO + 19)
#define ERROR_LRC_K_MODULO -(MAX_ERRNO + 20)
#define ERROR_LRC_M_MODULO -(MAX_ERRNO + 21)
class ErasureCodeLrc final : public ceph::ErasureCode {
public:
static const std::string DEFAULT_KML;
struct Layer {
explicit Layer(const std::string &_chunks_map) : chunks_map(_chunks_map) { }
ceph::ErasureCodeInterfaceRef erasure_code;
std::vector<int> data;
std::vector<int> coding;
std::vector<int> chunks;
std::set<int> chunks_as_set;
std::string chunks_map;
ceph::ErasureCodeProfile profile;
};
std::vector<Layer> layers;
std::string directory;
unsigned int chunk_count;
unsigned int data_chunk_count;
std::string rule_root;
std::string rule_device_class;
struct Step {
Step(const std::string &_op, const std::string &_type, int _n) :
op(_op),
type(_type),
n(_n) {}
std::string op;
std::string type;
int n;
};
std::vector<Step> rule_steps;
explicit ErasureCodeLrc(const std::string &dir)
: directory(dir),
chunk_count(0), data_chunk_count(0), rule_root("default")
{
rule_steps.push_back(Step("chooseleaf", "host", 0));
}
~ErasureCodeLrc() override {}
std::set<int> get_erasures(const std::set<int> &need,
const std::set<int> &available) const;
int _minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::set<int> *minimum) override;
int create_rule(const std::string &name,
CrushWrapper &crush,
std::ostream *ss) const override;
unsigned int get_chunk_count() const override {
return chunk_count;
}
unsigned int get_data_chunk_count() const override {
return data_chunk_count;
}
unsigned int get_chunk_size(unsigned int object_size) const override;
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::buffer::list> *encoded) override;
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
virtual int parse(ceph::ErasureCodeProfile &profile, std::ostream *ss);
int parse_kml(ceph::ErasureCodeProfile &profile, std::ostream *ss);
int parse_rule(ceph::ErasureCodeProfile &profile, std::ostream *ss);
int parse_rule_step(const std::string &description_string,
json_spirit::mArray description,
std::ostream *ss);
int layers_description(const ceph::ErasureCodeProfile &profile,
json_spirit::mArray *description,
std::ostream *ss) const;
int layers_parse(const std::string &description_string,
json_spirit::mArray description,
std::ostream *ss);
int layers_init(std::ostream *ss);
int layers_sanity_checks(const std::string &description_string,
std::ostream *ss) const;
};
#endif
| 4,502 | 31.395683 | 80 | h |
null | ceph-main/src/erasure-code/lrc/ErasureCodePluginLrc.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_LRC_H
#define CEPH_ERASURE_CODE_PLUGIN_LRC_H
#include "erasure-code/ErasureCodePlugin.h"
class ErasureCodePluginLrc : public ceph::ErasureCodePlugin {
public:
int factory(const std::string &directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) override;
};
#endif
| 956 | 28.90625 | 71 | h |
null | ceph-main/src/erasure-code/shec/ErasureCodePluginShec.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_SHEC_H
#define CEPH_ERASURE_CODE_PLUGIN_SHEC_H
#include "ErasureCodeShecTableCache.h"
#include "erasure-code/ErasureCodePlugin.h"
class ErasureCodePluginShec : public ceph::ErasureCodePlugin {
public:
ErasureCodeShecTableCache tcache;
int factory(const std::string &directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) override;
};
#endif
| 1,035 | 28.6 | 71 | h |
null | ceph-main/src/erasure-code/shec/ErasureCodeShec.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 FUJITSU LIMITED
* Copyright (C) 2013, 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Takanori Nakao <[email protected]>
* Author: Takeshi Miyamae <[email protected]>
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_SHEC_H
#define CEPH_ERASURE_CODE_SHEC_H
#include "erasure-code/ErasureCode.h"
#include "ErasureCodeShecTableCache.h"
class ErasureCodeShec : public ceph::ErasureCode {
public:
enum {
MULTIPLE = 0,
SINGLE = 1
};
ErasureCodeShecTableCache &tcache;
int k;
int DEFAULT_K;
int m;
int DEFAULT_M;
int c;
int DEFAULT_C;
int w;
int DEFAULT_W;
int technique;
int *matrix;
ErasureCodeShec(const int _technique,
ErasureCodeShecTableCache &_tcache) :
tcache(_tcache),
k(0),
DEFAULT_K(4),
m(0),
DEFAULT_M(3),
c(0),
DEFAULT_C(2),
w(0),
DEFAULT_W(8),
technique(_technique),
matrix(0)
{}
~ErasureCodeShec() override {}
unsigned int get_chunk_count() const override {
return k + m;
}
unsigned int get_data_chunk_count() const override {
return k;
}
unsigned int get_chunk_size(unsigned int object_size) const override;
int _minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available_chunks,
std::set<int> *minimum);
int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) override;
int encode(const std::set<int> &want_to_encode,
const ceph::buffer::list &in,
std::map<int, ceph::buffer::list> *encoded) override;
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::buffer::list> *encoded) override;
int _decode(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
virtual void shec_encode(char **data,
char **coding,
int blocksize) = 0;
virtual int shec_decode(int *erasures,
int *avails,
char **data,
char **coding,
int blocksize) = 0;
virtual unsigned get_alignment() const = 0;
virtual void prepare() = 0;
virtual int shec_matrix_decode(int *erased, int *avails,
char **data_ptrs, char **coding_ptrs, int size);
virtual int* shec_reedsolomon_coding_matrix(int is_single);
private:
virtual int parse(const ceph::ErasureCodeProfile &profile) = 0;
virtual double shec_calc_recovery_efficiency1(int k, int m1, int m2, int c1, int c2);
virtual int shec_make_decoding_matrix(bool prepare,
int *want, int *avails,
int *decoding_matrix,
int *dm_row, int *dm_column,
int *minimum);
};
class ErasureCodeShecReedSolomonVandermonde final : public ErasureCodeShec {
public:
ErasureCodeShecReedSolomonVandermonde(ErasureCodeShecTableCache &_tcache,
int technique = MULTIPLE) :
ErasureCodeShec(technique, _tcache)
{}
~ErasureCodeShecReedSolomonVandermonde() override {
}
void shec_encode(char **data,
char **coding,
int blocksize) override;
int shec_decode(int *erasures,
int *avails,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
void prepare() override;
private:
int parse(const ceph::ErasureCodeProfile &profile) override;
};
#endif
| 4,271 | 27.864865 | 87 | h |
null | ceph-main/src/erasure-code/shec/ErasureCodeShecTableCache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 FUJITSU LIMITED
* Copyright (C) 2014 CERN (Switzerland)
*
* Author: Takanori Nakao <[email protected]>
* Author: Takeshi Miyamae <[email protected]>
* Author: Andreas-Joachim Peters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_SHEC_TABLE_CACHE_H
#define CEPH_ERASURE_CODE_SHEC_TABLE_CACHE_H
// -----------------------------------------------------------------------------
#include "common/ceph_mutex.h"
#include "erasure-code/ErasureCodeInterface.h"
// -----------------------------------------------------------------------------
#include <list>
// -----------------------------------------------------------------------------
class ErasureCodeShecTableCache {
// ---------------------------------------------------------------------------
// This class implements a table cache for encoding and decoding matrices.
// Encoding matrices are shared for the same (k,m,c,w) combination.
// It supplies a decoding matrix lru cache which is shared for identical
// matrix types e.g. there is one cache (lru-list + lru-map)
// ---------------------------------------------------------------------------
class DecodingCacheParameter {
public:
int* decoding_matrix; // size: k*k
int* dm_row; // size: k
int* dm_column; // size: k
int* minimum; // size: k+m
DecodingCacheParameter() {
decoding_matrix = 0;
dm_row = 0;
dm_column = 0;
minimum = 0;
}
~DecodingCacheParameter() {
if (decoding_matrix) {
delete[] decoding_matrix;
}
if (dm_row) {
delete[] dm_row;
}
if (dm_column) {
delete[] dm_column;
}
if (minimum) {
delete[] minimum;
}
}
};
public:
static const int decoding_tables_lru_length = 10000;
typedef std::pair<std::list<uint64_t>::iterator,
DecodingCacheParameter> lru_entry_t;
typedef std::map< int, int** > codec_table_t;
typedef std::map< int, codec_table_t > codec_tables_t__;
typedef std::map< int, codec_tables_t__ > codec_tables_t_;
typedef std::map< int, codec_tables_t_ > codec_tables_t;
typedef std::map< int, codec_tables_t > codec_technique_tables_t;
// int** matrix = codec_technique_tables_t[technique][k][m][c][w]
typedef std::map< uint64_t, lru_entry_t > lru_map_t;
typedef std::list< uint64_t > lru_list_t;
ErasureCodeShecTableCache() = default;
virtual ~ErasureCodeShecTableCache();
// mutex used to protect modifications in encoding/decoding table maps
ceph::mutex codec_tables_guard = ceph::make_mutex("shec-lru-cache");
bool getDecodingTableFromCache(int* matrix,
int* dm_row, int* dm_column,
int* minimum,
int technique,
int k, int m, int c, int w,
int* want, int* avails);
void putDecodingTableToCache(int* matrix,
int* dm_row, int* dm_column,
int* minimum,
int technique,
int k, int m, int c, int w,
int* want, int* avails);
int** getEncodingTable(int technique, int k, int m, int c, int w);
int** getEncodingTableNoLock(int technique, int k, int m, int c, int w);
int* setEncodingTable(int technique, int k, int m, int c, int w, int*);
private:
// encoding table accessed via table[matrix][k][m][c][w]
// decoding table cache accessed via map[matrixtype]
// decoding table lru list accessed via list[matrixtype]
codec_technique_tables_t encoding_table;
std::map<int, lru_map_t*> decoding_tables;
std::map<int, lru_list_t*> decoding_tables_lru;
lru_map_t* getDecodingTables(int technique);
lru_list_t* getDecodingTablesLru(int technique);
uint64_t getDecodingCacheSignature(int k, int m, int c, int w,
int *want, int *avails);
ceph::mutex* getLock();
};
#endif
| 4,490 | 36.115702 | 80 | h |
null | ceph-main/src/erasure-code/shec/determinant.c | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Fujitsu Laboratories
*
* Author: Takanori Nakao <[email protected]>
* Author: Takeshi Miyamae <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "jerasure/include/galois.h"
void print_matrix(int *mat, int dim)
{
int i, j;
for (i=0; i<dim; i++) {
for (j=0; j<dim; j++) {
printf("%d ", mat[i*dim+j]);
}
printf("\n");
}
}
int calc_determinant(int *matrix, int dim)
{
int i, j, k, *mat, det = 1, coeff_1, coeff_2, *row;
// print_matrix(matrix, dim);
mat = (int *)malloc(sizeof(int)*dim*dim);
if (mat == NULL) {
printf("mat malloc err\n");
goto out0;
}
memcpy((int *)mat, (int *)matrix, sizeof(int)*dim*dim);
row = (int *)malloc(sizeof(int)*dim);
if (row == NULL) {
printf("row malloc err\n");
goto out1;
}
for (i=0; i<dim; i++) {
if (mat[i*dim+i] == 0) {
for (k=i+1; k<dim; k++) {
if (mat[k*dim+i] != 0) {
memcpy((int *)row, (int *)&mat[k*dim], sizeof(int)*dim);
memcpy((int *)&mat[k*dim], (int *)&mat[i*dim], sizeof(int)*dim);
memcpy((int *)&mat[i*dim], (int *)row, sizeof(int)*dim);
break;
}
}
if (k == dim) {
det = 0;
goto out2;
}
}
coeff_1 = mat[i*dim+i];
for (j=i; j<dim; j++) {
mat[i*dim+j] = galois_single_divide(mat[i*dim+j], coeff_1, 8);
}
for (k=i+1; k<dim; k++) {
if (mat[k*dim+i] != 0) {
coeff_2 = mat[k*dim+i];
for (j=i; j<dim; j++) {
mat[k*dim+j] = mat[k*dim+j] ^ galois_single_multiply(mat[i*dim+j], coeff_2, 8);
}
}
}
det = galois_single_multiply(det, coeff_1, 8);
}
// print_matrix(mat, dim);
out2:
free(row);
out1:
free(mat);
out0:
return det;
}
| 2,145 | 21.589474 | 82 | c |
null | ceph-main/src/exporter/DaemonMetricCollector.h | #pragma once
#include "common/admin_socket_client.h"
#include <map>
#include <string>
#include <vector>
#include <boost/asio.hpp>
#include <boost/json/object.hpp>
#include <filesystem>
#include <map>
#include <string>
#include <vector>
struct pstat {
unsigned long utime;
unsigned long stime;
unsigned long minflt;
unsigned long majflt;
unsigned long start_time;
int num_threads;
unsigned long vm_size;
int resident_size;
};
class MetricsBuilder;
class OrderedMetricsBuilder;
class UnorderedMetricsBuilder;
class Metric;
typedef std::map<std::string, std::string> labels_t;
class DaemonMetricCollector {
public:
void main();
std::string get_metrics();
private:
std::map<std::string, AdminSocketClient> clients;
std::string metrics;
std::mutex metrics_mutex;
std::unique_ptr<MetricsBuilder> builder;
void update_sockets();
void request_loop(boost::asio::steady_timer &timer);
void dump_asok_metrics();
void dump_asok_metric(boost::json::object perf_info,
boost::json::value perf_values, std::string name,
labels_t labels);
std::pair<labels_t, std::string>
get_labels_and_metric_name(std::string daemon_name, std::string metric_name);
std::pair<labels_t, std::string> add_fixed_name_metrics(std::string metric_name);
void get_process_metrics(std::vector<std::pair<std::string, int>> daemon_pids);
std::string asok_request(AdminSocketClient &asok, std::string command, std::string daemon_name);
};
class Metric {
private:
struct metric_entry {
labels_t labels;
std::string value;
};
std::string name;
std::string mtype;
std::string description;
std::vector<metric_entry> entries;
public:
Metric(std::string name, std::string mtype, std::string description)
: name(name), mtype(mtype), description(description) {}
Metric(const Metric &) = default;
Metric() = default;
void add(labels_t labels, std::string value);
std::string dump();
};
class MetricsBuilder {
public:
virtual ~MetricsBuilder() = default;
virtual std::string dump() = 0;
virtual void add(std::string value, std::string name, std::string description,
std::string mtype, labels_t labels) = 0;
protected:
std::string out;
};
class OrderedMetricsBuilder : public MetricsBuilder {
private:
std::map<std::string, Metric> metrics;
public:
std::string dump();
void add(std::string value, std::string name, std::string description,
std::string mtype, labels_t labels);
};
class UnorderedMetricsBuilder : public MetricsBuilder {
public:
std::string dump();
void add(std::string value, std::string name, std::string description,
std::string mtype, labels_t labels);
};
DaemonMetricCollector &collector_instance();
| 2,770 | 25.141509 | 98 | h |
null | ceph-main/src/exporter/util.h | #include "common/hostname.h"
#include <chrono>
#include <string>
#define TIMED_FUNCTION() BlockTimer timer(__FILE__, __FUNCTION__)
class BlockTimer {
public:
BlockTimer(std::string file, std::string function);
~BlockTimer();
void stop();
double get_ms();
private:
std::chrono::duration<double, std::milli> ms;
std::string file, function;
bool stopped;
std::chrono::time_point<std::chrono::high_resolution_clock> t1, t2;
};
bool string_is_digit(std::string s);
std::string read_file_to_string(std::string path);
std::string get_hostname(std::string path);
void promethize(std::string &name);
| 606 | 23.28 | 68 | h |
null | ceph-main/src/extblkdev/ExtBlkDevInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* (C) Copyright IBM Corporation 2022
* Author: Martin Ohmacht <[email protected]>
*
* Based on the file ceph/src/erasure-code/ErasureCodeInterface.h
* Copyright (C) 2013 Cloudwatt <[email protected]>
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_EXT_BLK_DEV_INTERFACE_H
#define CEPH_EXT_BLK_DEV_INTERFACE_H
/*! @file ExtBlkDevInterface.h
@brief Interface provided by extended block device plugins
Block devices with verdor specific capabilities rely on plugins implementing
**ExtBlkDevInterface** to provide access to their capabilities.
Methods returning an **int** return **0** on success and a
negative value on error.
*/
#include <string>
#include <map>
#include <ostream>
#include <memory>
#ifdef __linux__
#include <sys/capability.h>
#else
typedef void *cap_t;
#endif
#include "common/PluginRegistry.h"
namespace ceph {
class ExtBlkDevState {
uint64_t logical_total=0;
uint64_t logical_avail=0;
uint64_t physical_total=0;
uint64_t physical_avail=0;
public:
uint64_t get_logical_total(){return logical_total;}
uint64_t get_logical_avail(){return logical_avail;}
uint64_t get_physical_total(){return physical_total;}
uint64_t get_physical_avail(){return physical_avail;}
void set_logical_total(uint64_t alogical_total){logical_total=alogical_total;}
void set_logical_avail(uint64_t alogical_avail){logical_avail=alogical_avail;}
void set_physical_total(uint64_t aphysical_total){physical_total=aphysical_total;}
void set_physical_avail(uint64_t aphysical_avail){physical_avail=aphysical_avail;}
};
class ExtBlkDevInterface {
public:
virtual ~ExtBlkDevInterface() {}
/**
* Initialize the instance if device logdevname is supported
*
* Return 0 on success or a negative errno on error
*
* @param [in] logdevname name of device to check for support by this plugin
* @return 0 on success or a negative errno on error.
*/
virtual int init(const std::string& logdevname) = 0;
/**
* Return the name of the underlying device detected by **init** method
*
* @return the name of the underlying device
*/
virtual const std::string& get_devname() const = 0;
/**
* Provide status of underlying physical storage after compression
*
* Return 0 on success or a negative errno on error.
*
* @param [out] state current state of the undelying device
* @return 0 on success or a negative errno on error.
*/
virtual int get_state(ExtBlkDevState& state) = 0;
/**
* Populate property map with meta data of device.
*
* @param [in] prefix prefix to be prepended to all map values by this method
* @param [in,out] pm property map of the device, to be extended by attributes detected by this plugin
* @return 0 on success or a negative errno on error.
*/
virtual int collect_metadata(const std::string& prefix, std::map<std::string,std::string> *pm) = 0;
};
typedef std::shared_ptr<ExtBlkDevInterface> ExtBlkDevInterfaceRef;
class ExtBlkDevPlugin : public Plugin {
public:
explicit ExtBlkDevPlugin(CephContext *cct) : Plugin(cct) {}
virtual ~ExtBlkDevPlugin() {}
/**
* Indicate plugin-required capabilities in permitted set
* If a plugin requires a capability to be active in the
* permitted set when invoked, it must indicate so by setting
* the required flags in the cap_t structure passed into this method.
* The cap_t structure is empty when passed into the method, and only the
* method's modifications to the permitted set are used by ceph.
* The plugin must elevate the capabilities into the effective
* set at a later point when needed during the invocation of its
* other methods, and is responsible to restore the effective set
* before returning from the method
*
* @param [out] caps capability set indicating the necessary capabilities
*/
virtual int get_required_cap_set(cap_t caps) = 0;
/**
* Factory method, creating ExtBlkDev instances
*
* @param [in] logdevname name of logic device, may be composed of physical devices
* @param [out] ext_blk_dev object created on successful device support detection
* @return 0 on success or a negative errno on error.
*/
virtual int factory(const std::string& logdevname,
ExtBlkDevInterfaceRef& ext_blk_dev) = 0;
};
}
#endif
| 4,914 | 33.612676 | 106 | h |
null | ceph-main/src/extblkdev/ExtBlkDevPlugin.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* (C) Copyright IBM Corporation 2022
* Author: Martin Ohmacht <[email protected]>
*
* Based on the file ceph/src/erasure-code/ErasureCodePlugin.h
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_EXT_BLK_DEV_PLUGIN_H
#define CEPH_EXT_BLK_DEV_PLUGIN_H
#include "ExtBlkDevInterface.h"
namespace ceph {
namespace extblkdev {
int preload(CephContext *cct);
int detect_device(CephContext *cct,
const std::string &logdevname,
ExtBlkDevInterfaceRef& ebd_impl);
int release_device(ExtBlkDevInterfaceRef& ebd_impl);
}
}
#endif
| 1,100 | 27.230769 | 71 | h |
null | ceph-main/src/extblkdev/vdo/ExtBlkDevPluginVdo.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* (C) Copyright IBM Corporation 2022
* Author: Martin Ohmacht <[email protected]>
*
* Based on the file src/erasure-code/clay/ErasureCodePluginClay.h
* Copyright (C) 2018 Indian Institute of Science <[email protected]>
*
* Author: Myna Vajha <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_EXT_BLK_DEV_PLUGIN_VDO_H
#define CEPH_EXT_BLK_DEV_PLUGIN_VDO_H
#include "ExtBlkDevVdo.h"
class ExtBlkDevPluginVdo : public ceph::ExtBlkDevPlugin {
public:
explicit ExtBlkDevPluginVdo(CephContext *cct) : ExtBlkDevPlugin(cct) {}
int get_required_cap_set(cap_t caps) override;
int factory(const std::string& logdevname,
ceph::ExtBlkDevInterfaceRef& ext_blk_dev) override;
};
#endif
| 1,103 | 30.542857 | 73 | h |
null | ceph-main/src/extblkdev/vdo/ExtBlkDevVdo.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* (C) Copyright IBM Corporation 2022
* Author: Martin Ohmacht <[email protected]>
*
* Based on the file ceph/src/common/blkdev.cc
* Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
*
* And also based on the file src/erasure-code/clay/ErasureCodeClay.h
* Copyright (C) 2018 Indian Institute of Science <[email protected]>
*
* Author: Myna Vajha <[email protected]>
*
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_EXT_BLK_DEV_VDO_H
#define CEPH_EXT_BLK_DEV_VDO_H
#include "extblkdev/ExtBlkDevInterface.h"
#include "include/compat.h"
class ExtBlkDevVdo final : public ceph::ExtBlkDevInterface
{
int vdo_dir_fd = -1; ///< fd for vdo sysfs directory
std::string name; // name of the underlying vdo device
std::string logdevname; // name of the top level logical device
CephContext *cct;
public:
explicit ExtBlkDevVdo(CephContext *cct) : cct(cct) {}
~ExtBlkDevVdo(){
if(vdo_dir_fd >= 0)
VOID_TEMP_FAILURE_RETRY(::close(vdo_dir_fd));
}
int _get_vdo_stats_handle(const std::string& devname);
int get_vdo_stats_handle();
int64_t get_vdo_stat(const char *property);
virtual int init(const std::string& logdevname);
virtual const std::string& get_devname() const {return name;}
virtual int get_state(ceph::ExtBlkDevState& state);
virtual int collect_metadata(const std::string& prefix, std::map<std::string,std::string> *pm);
};
#endif
| 1,741 | 31.867925 | 97 | h |
null | ceph-main/src/global/global_context.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_GLOBAL_CONTEXT_H
#define CEPH_GLOBAL_CONTEXT_H
#include <limits.h>
#include "common/config_fwd.h"
#include "include/common_fwd.h"
namespace TOPNSPC::global {
extern CephContext *g_ceph_context;
ConfigProxy& g_conf();
extern const char *g_assert_file;
extern int g_assert_line;
extern const char *g_assert_func;
extern const char *g_assert_condition;
extern unsigned long long g_assert_thread;
extern char g_assert_thread_name[4096];
extern char g_assert_msg[8096];
extern char g_process_name[NAME_MAX + 1];
extern bool g_eio;
extern char g_eio_devname[1024];
extern char g_eio_path[PATH_MAX];
extern int g_eio_error;
extern int g_eio_iotype; // IOCB_CMD_* from libaio's aio_abh.io
extern unsigned long long g_eio_offset;
extern unsigned long long g_eio_length;
extern int note_io_error_event(
const char *devname,
const char *path,
int error,
int iotype,
unsigned long long offset,
unsigned long long length);
}
using namespace TOPNSPC::global;
#endif
| 1,395 | 24.381818 | 70 | h |
null | ceph-main/src/global/global_init.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_GLOBAL_INIT_H
#define CEPH_COMMON_GLOBAL_INIT_H
#include <stdint.h>
#include <vector>
#include <map>
#include <boost/intrusive_ptr.hpp>
#include "include/ceph_assert.h"
#include "common/ceph_context.h"
#include "common/code_environment.h"
#include "common/common_init.h"
/*
* global_init is the first initialization function that
* daemons and utility programs need to call. It takes care of a lot of
* initialization, including setting up g_ceph_context.
*/
boost::intrusive_ptr<CephContext>
global_init(
const std::map<std::string,std::string> *defaults,
std::vector < const char* >& args,
uint32_t module_type,
code_environment_t code_env,
int flags, bool run_pre_init = true);
// just the first half; enough to get config parsed but doesn't start up the
// cct or log.
void global_pre_init(const std::map<std::string,std::string> *defaults,
std::vector < const char* >& args,
uint32_t module_type, code_environment_t code_env,
int flags);
/*
* perform all of the steps that global_init_daemonize performs just prior
* to actually forking (via daemon(3)). return 0 if we are going to proceed
* with the fork, or -1 otherwise.
*/
int global_init_prefork(CephContext *cct);
/*
* perform all the steps that global_init_daemonize performs just after
* the fork, except closing stderr, which we'll do later on.
*/
void global_init_postfork_start(CephContext *cct);
/*
* close stderr, thus completing the postfork.
*/
void global_init_postfork_finish(CephContext *cct);
/*
* global_init_daemonize handles daemonizing a process.
*
* If this is called, it *must* be called before common_init_finish.
* Note that this is equivalent to calling _prefork(), daemon(), and
* _postfork.
*/
void global_init_daemonize(CephContext *cct);
/*
* global_init_chdir changes the process directory.
*
* If this is called, it *must* be called before common_init_finish
*/
void global_init_chdir(const CephContext *cct);
/*
* Explicitly shut down stderr. Usually, you don't need to do
* this, because global_init_daemonize will do it for you. However, in some
* rare cases you need to call this explicitly.
*
* If this is called, it *must* be called before common_init_finish
*/
int global_init_shutdown_stderr(CephContext *cct);
/*
* Preload the erasure coding libraries to detect early issues with
* configuration.
*/
int global_init_preload_erasure_code(const CephContext *cct);
/**
* print daemon startup banner/warning
*/
void global_print_banner(void);
#endif
| 2,952 | 27.95098 | 76 | h |
null | ceph-main/src/global/pidfile.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_PIDFILE_H
#define CEPH_COMMON_PIDFILE_H
#include <string_view>
// Write a pidfile with the current pid, using the configuration in the
// provided conf structure.
[[nodiscard]] int pidfile_write(std::string_view pid_file);
// Remove the pid file that was previously written by pidfile_write.
// This is safe to call in a signal handler context.
void pidfile_remove();
#endif
| 808 | 26.896552 | 71 | h |
null | ceph-main/src/global/signal_handler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_GLOBAL_SIGNAL_HANDLER_H
#define CEPH_GLOBAL_SIGNAL_HANDLER_H
#include <signal.h>
#include "acconfig.h"
#include <map>
#include <string>
typedef void (*signal_handler_t)(int);
namespace ceph {
struct BackTrace;
}
#if defined(HAVE_SIGDESCR_NP)
# define sig_str(signum) sigdescr_np(signum)
#elif defined(HAVE_REENTRANT_STRSIGNAL)
# define sig_str(signum) strsignal(signum)
#else
# define sig_str(signum) sys_siglist[signum]
#endif
void install_sighandler(int signum, signal_handler_t handler, int flags);
// handles SIGHUP
void sighup_handler(int signum);
// Install the standard Ceph signal handlers
void install_standard_sighandlers(void);
/// initialize async signal handler framework
void init_async_signal_handler();
/// shutdown async signal handler framework
void shutdown_async_signal_handler();
/// queue an async signal
void queue_async_signal(int signum);
/// install a safe, async, callback for the given signal
void register_async_signal_handler(int signum, signal_handler_t handler);
void register_async_signal_handler_oneshot(int signum, signal_handler_t handler);
/// uninstall a safe async signal callback
void unregister_async_signal_handler(int signum, signal_handler_t handler);
void generate_crash_dump(char *base,
const ceph::BackTrace& bt,
std::map<std::string,std::string> *extra = 0);
#endif
| 1,758 | 25.651515 | 81 | h |
null | ceph-main/src/include/CompatSet.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMPATSET_H
#define CEPH_COMPATSET_H
#include <iostream>
#include <map>
#include <string>
#include "include/buffer.h"
#include "include/encoding.h"
#include "include/types.h"
#include "common/Formatter.h"
struct CompatSet {
struct Feature {
uint64_t id;
std::string name;
Feature(uint64_t _id, const std::string& _name) : id(_id), name(_name) {}
};
class FeatureSet {
uint64_t mask;
std::map<uint64_t, std::string> names;
public:
friend struct CompatSet;
friend class CephCompatSet_AllSet_Test;
friend class CephCompatSet_other_Test;
friend class CephCompatSet_merge_Test;
friend std::ostream& operator<<(std::ostream& out, const CompatSet::FeatureSet& fs);
friend std::ostream& operator<<(std::ostream& out, const CompatSet& compat);
FeatureSet() : mask(1), names() {}
void insert(const Feature& f) {
ceph_assert(f.id > 0);
ceph_assert(f.id < 64);
mask |= ((uint64_t)1<<f.id);
names[f.id] = f.name;
}
bool contains(const Feature& f) const {
return names.count(f.id);
}
bool contains(uint64_t f) const {
return names.count(f);
}
/**
* Getter instead of using name[] to be const safe
*/
std::string get_name(uint64_t const f) const {
std::map<uint64_t, std::string>::const_iterator i = names.find(f);
ceph_assert(i != names.end());
return i->second;
}
void remove(uint64_t f) {
if (names.count(f)) {
names.erase(f);
mask &= ~((uint64_t)1<<f);
}
}
void remove(const Feature& f) {
remove(f.id);
}
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
/* See below, mask always has the lowest bit set in memory, but
* unset in the encoding */
encode(mask & (~(uint64_t)1), bl);
encode(names, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
decode(mask, bl);
decode(names, bl);
/**
* Previously, there was a bug where insert did
* mask |= f.id rather than mask |= (1 << f.id).
* In FeatureSets from those version, mask always
* has the lowest bit set. Since then, masks always
* have the lowest bit unset.
*
* When we encounter such a FeatureSet, we have to
* reconstruct the mask from the names map.
*/
if (mask & 1) {
mask = 1;
std::map<uint64_t, std::string> temp_names;
temp_names.swap(names);
for (auto i = temp_names.begin(); i != temp_names.end(); ++i) {
insert(Feature(i->first, i->second));
}
} else {
mask |= 1;
}
}
void dump(ceph::Formatter *f) const {
for (auto p = names.cbegin(); p != names.cend(); ++p) {
char s[18];
snprintf(s, sizeof(s), "feature_%llu", (unsigned long long)p->first);
f->dump_string(s, p->second);
}
}
};
// These features have no impact on the read / write status
FeatureSet compat;
// If any of these features are missing, read is possible ( as long
// as no incompat feature is missing ) but it is not possible to write
FeatureSet ro_compat;
// If any of these features are missing, read or write is not possible
FeatureSet incompat;
CompatSet(FeatureSet& _compat, FeatureSet& _ro_compat, FeatureSet& _incompat) :
compat(_compat), ro_compat(_ro_compat), incompat(_incompat) {}
CompatSet() : compat(), ro_compat(), incompat() { }
/* does this filesystem implementation have the
features required to read the other? */
bool readable(CompatSet const& other) const {
return !((other.incompat.mask ^ incompat.mask) & other.incompat.mask);
}
/* does this filesystem implementation have the
features required to write the other? */
bool writeable(CompatSet const& other) const {
return readable(other) &&
!((other.ro_compat.mask ^ ro_compat.mask) & other.ro_compat.mask);
}
/* Compare this CompatSet to another.
* CAREFULLY NOTE: This operation is NOT commutative.
* a > b DOES NOT imply that b < a.
* If returns:
* 0: The CompatSets have the same feature set.
* 1: This CompatSet's features are a strict superset of the other's.
* -1: This CompatSet is missing at least one feature
* described in the other. It may still have more features, though.
*/
int compare(const CompatSet& other) const {
if ((other.compat.mask == compat.mask) &&
(other.ro_compat.mask == ro_compat.mask) &&
(other.incompat.mask == incompat.mask)) return 0;
//okay, they're not the same
//if we're writeable we have a superset of theirs on incompat and ro_compat
if (writeable(other) && !((other.compat.mask ^ compat.mask)
& other.compat.mask)) return 1;
//if we make it here, we weren't writeable or had a difference compat set
return -1;
}
/* Get the features supported by other CompatSet but not this one,
* as a CompatSet.
*/
CompatSet unsupported(const CompatSet& other) const {
CompatSet diff;
uint64_t other_compat =
((other.compat.mask ^ compat.mask) & other.compat.mask);
uint64_t other_ro_compat =
((other.ro_compat.mask ^ ro_compat.mask) & other.ro_compat.mask);
uint64_t other_incompat =
((other.incompat.mask ^ incompat.mask) & other.incompat.mask);
for (int id = 1; id < 64; ++id) {
uint64_t mask = (uint64_t)1 << id;
if (mask & other_compat) {
diff.compat.insert( Feature(id, other.compat.names.at(id)));
}
if (mask & other_ro_compat) {
diff.ro_compat.insert(Feature(id, other.ro_compat.names.at(id)));
}
if (mask & other_incompat) {
diff.incompat.insert( Feature(id, other.incompat.names.at(id)));
}
}
return diff;
}
/* Merge features supported by other CompatSet into this one.
* Return: true if some features were merged
*/
bool merge(CompatSet const & other) {
uint64_t other_compat =
((other.compat.mask ^ compat.mask) & other.compat.mask);
uint64_t other_ro_compat =
((other.ro_compat.mask ^ ro_compat.mask) & other.ro_compat.mask);
uint64_t other_incompat =
((other.incompat.mask ^ incompat.mask) & other.incompat.mask);
if (!other_compat && !other_ro_compat && !other_incompat)
return false;
for (int id = 1; id < 64; ++id) {
uint64_t mask = (uint64_t)1 << id;
if (mask & other_compat) {
compat.insert( Feature(id, other.compat.get_name(id)));
}
if (mask & other_ro_compat) {
ro_compat.insert(Feature(id, other.ro_compat.get_name(id)));
}
if (mask & other_incompat) {
incompat.insert( Feature(id, other.incompat.get_name(id)));
}
}
return true;
}
std::ostream& printlite(std::ostream& o) const {
o << "{c=[" << std::hex << compat.mask << "]";
o << ",r=[" << std::hex << ro_compat.mask << "]";
o << ",i=[" << std::hex << incompat.mask << "]}";
o << std::dec;
return o;
}
void encode(ceph::buffer::list& bl) const {
compat.encode(bl);
ro_compat.encode(bl);
incompat.encode(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
compat.decode(bl);
ro_compat.decode(bl);
incompat.decode(bl);
}
void dump(ceph::Formatter *f) const {
f->open_object_section("compat");
compat.dump(f);
f->close_section();
f->open_object_section("ro_compat");
ro_compat.dump(f);
f->close_section();
f->open_object_section("incompat");
incompat.dump(f);
f->close_section();
}
static void generate_test_instances(std::list<CompatSet*>& o) {
o.push_back(new CompatSet);
o.push_back(new CompatSet);
o.back()->compat.insert(Feature(1, "one"));
o.back()->compat.insert(Feature(2, "two"));
o.back()->ro_compat.insert(Feature(4, "four"));
o.back()->incompat.insert(Feature(3, "three"));
}
};
WRITE_CLASS_ENCODER(CompatSet)
inline std::ostream& operator<<(std::ostream& out, const CompatSet::Feature& f)
{
return out << "F(" << f.id << ", \"" << f.name << "\")";
}
inline std::ostream& operator<<(std::ostream& out, const CompatSet::FeatureSet& fs)
{
return out << fs.names;
}
inline std::ostream& operator<<(std::ostream& out, const CompatSet& compat)
{
return out << "compat=" << compat.compat
<< ",rocompat=" << compat.ro_compat
<< ",incompat=" << compat.incompat;
}
#endif
| 8,745 | 29.58042 | 88 | h |
null | ceph-main/src/include/Context.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CONTEXT_H
#define CEPH_CONTEXT_H
#include "common/dout.h"
#include <functional>
#include <list>
#include <memory>
#include <set>
#include <boost/function.hpp>
#include <boost/system/error_code.hpp>
#include "common/error_code.h"
#include "include/ceph_assert.h"
#include "common/ceph_mutex.h"
#define mydout(cct, v) lgeneric_subdout(cct, context, v)
/*
* GenContext - abstract callback class
*/
template <typename T>
class GenContext {
GenContext(const GenContext& other);
const GenContext& operator=(const GenContext& other);
protected:
virtual void finish(T t) = 0;
public:
GenContext() {}
virtual ~GenContext() {} // we want a virtual destructor!!!
template <typename C>
void complete(C &&t) {
finish(std::forward<C>(t));
delete this;
}
template <typename C>
void operator()(C &&t) noexcept {
complete(std::forward<C>(t));
}
template<typename U = T>
auto operator()() noexcept
-> typename std::enable_if<std::is_default_constructible<U>::value,
void>::type {
complete(T{});
}
std::reference_wrapper<GenContext> func() {
return std::ref(*this);
}
};
template <typename T>
using GenContextURef = std::unique_ptr<GenContext<T> >;
/*
* Context - abstract callback class
*/
class Finisher;
class Context {
Context(const Context& other);
const Context& operator=(const Context& other);
protected:
virtual void finish(int r) = 0;
// variant of finish that is safe to call "synchronously." override should
// return true.
virtual bool sync_finish(int r) {
return false;
}
public:
Context() {}
virtual ~Context() {} // we want a virtual destructor!!!
virtual void complete(int r) {
finish(r);
delete this;
}
virtual bool sync_complete(int r) {
if (sync_finish(r)) {
delete this;
return true;
}
return false;
}
void complete(boost::system::error_code ec) {
complete(ceph::from_error_code(ec));
}
void operator()(boost::system::error_code ec) noexcept {
complete(ec);
}
void operator()() noexcept {
complete({});
}
std::reference_wrapper<Context> func() {
return std::ref(*this);
}
};
/**
* Simple context holding a single object
*/
template<class T>
class ContainerContext : public Context {
T obj;
public:
ContainerContext(T &obj) : obj(obj) {}
void finish(int r) override {}
};
template <typename T>
ContainerContext<T> *make_container_context(T &&t) {
return new ContainerContext<T>(std::forward<T>(t));
}
template <class T>
struct Wrapper : public Context {
Context *to_run;
T val;
Wrapper(Context *to_run, T val) : to_run(to_run), val(val) {}
void finish(int r) override {
if (to_run)
to_run->complete(r);
}
};
struct RunOnDelete {
Context *to_run;
RunOnDelete(Context *to_run) : to_run(to_run) {}
~RunOnDelete() {
if (to_run)
to_run->complete(0);
}
};
typedef std::shared_ptr<RunOnDelete> RunOnDeleteRef;
template <typename T>
class LambdaContext : public Context {
public:
LambdaContext(T &&t) : t(std::forward<T>(t)) {}
void finish(int r) override {
if constexpr (std::is_invocable_v<T, int>)
t(r);
else
t();
}
private:
T t;
};
template <typename T>
LambdaContext<T> *make_lambda_context(T &&t) {
return new LambdaContext<T>(std::move(t));
}
template <typename F, typename T>
struct LambdaGenContext : GenContext<T> {
F f;
LambdaGenContext(F &&f) : f(std::forward<F>(f)) {}
void finish(T t) override {
f(std::forward<T>(t));
}
};
template <typename T, typename F>
GenContextURef<T> make_gen_lambda_context(F &&f) {
return GenContextURef<T>(new LambdaGenContext<F, T>(std::move(f)));
}
/*
* finish and destroy a list of Contexts
*/
template<class C>
inline void finish_contexts(CephContext *cct, C& finished, int result = 0)
{
if (finished.empty())
return;
C ls;
ls.swap(finished); // swap out of place to avoid weird loops
if (cct)
mydout(cct,10) << ls.size() << " contexts to finish with " << result << dendl;
for (Context* c : ls) {
if (cct)
mydout(cct,10) << "---- " << c << dendl;
c->complete(result);
}
}
class C_NoopContext : public Context {
public:
void finish(int r) override { }
};
struct C_Lock : public Context {
ceph::mutex *lock;
Context *fin;
C_Lock(ceph::mutex *l, Context *c) : lock(l), fin(c) {}
~C_Lock() override {
delete fin;
}
void finish(int r) override {
if (fin) {
std::lock_guard l{*lock};
fin->complete(r);
fin = NULL;
}
}
};
/*
* C_Contexts - set of Contexts
*
* ContextType must be an ancestor class of ContextInstanceType, or the same class.
* ContextInstanceType must be default-constructable.
*/
template <class ContextType, class ContextInstanceType, class Container = std::list<ContextType *>>
class C_ContextsBase : public ContextInstanceType {
public:
CephContext *cct;
Container contexts;
C_ContextsBase(CephContext *cct_)
: cct(cct_)
{
}
~C_ContextsBase() override {
for (auto c : contexts) {
delete c;
}
}
void add(ContextType* c) {
contexts.push_back(c);
}
void take(Container& ls) {
Container c;
c.swap(ls);
if constexpr (std::is_same_v<Container, std::list<ContextType *>>) {
contexts.splice(contexts.end(), c);
} else {
contexts.insert(contexts.end(), c.begin(), c.end());
}
}
void complete(int r) override {
// Neuter any ContextInstanceType custom complete(), because although
// I want to look like it, I don't actually want to run its code.
Context::complete(r);
}
void finish(int r) override {
finish_contexts(cct, contexts, r);
}
bool empty() { return contexts.empty(); }
template<class C>
static ContextType *list_to_context(C& cs) {
if (cs.size() == 0) {
return 0;
} else if (cs.size() == 1) {
ContextType *c = cs.front();
cs.clear();
return c;
} else {
C_ContextsBase<ContextType, ContextInstanceType> *c(new C_ContextsBase<ContextType, ContextInstanceType>(0));
c->take(cs);
return c;
}
}
};
typedef C_ContextsBase<Context, Context> C_Contexts;
/*
* C_Gather
*
* ContextType must be an ancestor class of ContextInstanceType, or the same class.
* ContextInstanceType must be default-constructable.
*
* BUG:? only reports error from last sub to have an error return
*/
template <class ContextType, class ContextInstanceType>
class C_GatherBase {
private:
CephContext *cct;
int result = 0;
ContextType *onfinish;
#ifdef DEBUG_GATHER
std::set<ContextType*> waitfor;
#endif
int sub_created_count = 0;
int sub_existing_count = 0;
mutable ceph::recursive_mutex lock =
ceph::make_recursive_mutex("C_GatherBase::lock"); // disable lockdep
bool activated = false;
void sub_finish(ContextType* sub, int r) {
lock.lock();
#ifdef DEBUG_GATHER
ceph_assert(waitfor.count(sub));
waitfor.erase(sub);
#endif
--sub_existing_count;
mydout(cct,10) << "C_GatherBase " << this << ".sub_finish(r=" << r << ") " << sub
#ifdef DEBUG_GATHER
<< " (remaining " << waitfor << ")"
#endif
<< dendl;
if (r < 0 && result == 0)
result = r;
if ((activated == false) || (sub_existing_count != 0)) {
lock.unlock();
return;
}
lock.unlock();
delete_me();
}
void delete_me() {
if (onfinish) {
onfinish->complete(result);
onfinish = 0;
}
delete this;
}
class C_GatherSub : public ContextInstanceType {
C_GatherBase *gather;
public:
C_GatherSub(C_GatherBase *g) : gather(g) {}
void complete(int r) override {
// Cancel any customized complete() functionality
// from the Context subclass we're templated for,
// we only want to hit that in onfinish, not at each
// sub finish. e.g. MDSInternalContext.
Context::complete(r);
}
void finish(int r) override {
gather->sub_finish(this, r);
gather = 0;
}
~C_GatherSub() override {
if (gather)
gather->sub_finish(this, 0);
}
};
public:
C_GatherBase(CephContext *cct_, ContextType *onfinish_)
: cct(cct_), onfinish(onfinish_)
{
mydout(cct,10) << "C_GatherBase " << this << ".new" << dendl;
}
~C_GatherBase() {
mydout(cct,10) << "C_GatherBase " << this << ".delete" << dendl;
}
void set_finisher(ContextType *onfinish_) {
std::lock_guard l{lock};
ceph_assert(!onfinish);
onfinish = onfinish_;
}
void activate() {
lock.lock();
ceph_assert(activated == false);
activated = true;
if (sub_existing_count != 0) {
lock.unlock();
return;
}
lock.unlock();
delete_me();
}
ContextType *new_sub() {
std::lock_guard l{lock};
ceph_assert(activated == false);
sub_created_count++;
sub_existing_count++;
ContextType *s = new C_GatherSub(this);
#ifdef DEBUG_GATHER
waitfor.insert(s);
#endif
mydout(cct,10) << "C_GatherBase " << this << ".new_sub is " << sub_created_count << " " << s << dendl;
return s;
}
inline int get_sub_existing_count() const {
std::lock_guard l{lock};
return sub_existing_count;
}
inline int get_sub_created_count() const {
std::lock_guard l{lock};
return sub_created_count;
}
};
/*
* The C_GatherBuilder remembers each C_Context created by
* C_GatherBuilder.new_sub() in a C_Gather. When a C_Context created
* by new_sub() is complete(), C_Gather forgets about it. When
* C_GatherBuilder notices that there are no C_Context left in
* C_Gather, it calls complete() on the C_Context provided as the
* second argument of the constructor (finisher).
*
* How to use C_GatherBuilder:
*
* 1. Create a C_GatherBuilder on the stack
* 2. Call gather_bld.new_sub() as many times as you want to create new subs
* It is safe to call this 0 times, or 100, or anything in between.
* 3. If you didn't supply a finisher in the C_GatherBuilder constructor,
* set one with gather_bld.set_finisher(my_finisher)
* 4. Call gather_bld.activate()
*
* Example:
*
* C_SaferCond all_done;
* C_GatherBuilder gb(g_ceph_context, all_done);
* j.submit_entry(1, first, 0, gb.new_sub()); // add a C_Context to C_Gather
* j.submit_entry(2, first, 0, gb.new_sub()); // add a C_Context to C_Gather
* gb.activate(); // consume C_Context as soon as they complete()
* all_done.wait(); // all_done is complete() after all new_sub() are complete()
*
* The finisher may be called at any point after step 4, including immediately
* from the activate() function.
* The finisher will never be called before activate().
*
* Note: Currently, subs must be manually freed by the caller (for some reason.)
*/
template <class ContextType, class GatherType>
class C_GatherBuilderBase
{
public:
C_GatherBuilderBase(CephContext *cct_)
: cct(cct_), c_gather(NULL), finisher(NULL), activated(false)
{
}
C_GatherBuilderBase(CephContext *cct_, ContextType *finisher_)
: cct(cct_), c_gather(NULL), finisher(finisher_), activated(false)
{
}
~C_GatherBuilderBase() {
if (c_gather) {
ceph_assert(activated); // Don't forget to activate your C_Gather!
}
else {
delete finisher;
}
}
ContextType *new_sub() {
if (!c_gather) {
c_gather = new GatherType(cct, finisher);
}
return c_gather->new_sub();
}
void activate() {
if (!c_gather)
return;
ceph_assert(finisher != NULL);
activated = true;
c_gather->activate();
}
void set_finisher(ContextType *finisher_) {
finisher = finisher_;
if (c_gather)
c_gather->set_finisher(finisher);
}
GatherType *get() const {
return c_gather;
}
bool has_subs() const {
return (c_gather != NULL);
}
int num_subs_created() {
ceph_assert(!activated);
if (c_gather == NULL)
return 0;
return c_gather->get_sub_created_count();
}
int num_subs_remaining() {
ceph_assert(!activated);
if (c_gather == NULL)
return 0;
return c_gather->get_sub_existing_count();
}
private:
CephContext *cct;
GatherType *c_gather;
ContextType *finisher;
bool activated;
};
typedef C_GatherBase<Context, Context> C_Gather;
typedef C_GatherBuilderBase<Context, C_Gather > C_GatherBuilder;
template <class ContextType>
class ContextFactory {
public:
virtual ~ContextFactory() {}
virtual ContextType *build() = 0;
};
inline auto lambdafy(Context *c) {
return [fin = std::unique_ptr<Context>(c)]
(boost::system::error_code ec) mutable {
fin.release()->complete(ceph::from_error_code(ec));
};
}
#undef mydout
#endif
| 13,026 | 23.304104 | 115 | h |
null | ceph-main/src/include/alloc_ptr.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ALLOC_PTR_H
#define CEPH_ALLOC_PTR_H
#include <memory>
template <class T>
class alloc_ptr
{
public:
typedef typename std::pointer_traits< std::unique_ptr<T> >::pointer pointer;
typedef typename std::pointer_traits< std::unique_ptr<T> >::element_type element_type;
alloc_ptr() : ptr() {}
template<class U>
alloc_ptr(U&& u) : ptr(std::forward<U>(u)) {}
alloc_ptr(alloc_ptr<pointer>&& rhs) : ptr(std::move(rhs.ptr)) {}
alloc_ptr(const alloc_ptr<pointer>& rhs) = delete;
alloc_ptr& operator=(const alloc_ptr<pointer>&& rhs) {
ptr = rhs.ptr;
}
alloc_ptr& operator=(const alloc_ptr<pointer>& rhs) {
ptr = rhs.ptr;
}
void swap (alloc_ptr<pointer>& rhs) {
ptr.swap(rhs.ptr);
}
element_type* release() {
return ptr.release();
}
void reset(element_type *p = nullptr) {
ptr.reset(p);
}
element_type* get() const {
if (!ptr)
ptr.reset(new element_type);
return ptr.get();
}
element_type& operator*() const {
if (!ptr)
ptr.reset(new element_type);
return *ptr;
}
element_type* operator->() const {
if (!ptr)
ptr.reset(new element_type);
return ptr.get();
}
operator bool() const {
return !!ptr;
}
friend bool operator< (const alloc_ptr& lhs, const alloc_ptr& rhs) {
return std::less<element_type>(*lhs, *rhs);
}
friend bool operator<=(const alloc_ptr& lhs, const alloc_ptr& rhs) {
return std::less_equal<element_type>(*lhs, *rhs);
}
friend bool operator> (const alloc_ptr& lhs, const alloc_ptr& rhs) {
return std::greater<element_type>(*lhs, *rhs);
}
friend bool operator>=(const alloc_ptr& lhs, const alloc_ptr& rhs) {
return std::greater_equal<element_type>(*lhs, *rhs);
}
friend bool operator==(const alloc_ptr& lhs, const alloc_ptr& rhs) {
return *lhs == *rhs;
}
friend bool operator!=(const alloc_ptr& lhs, const alloc_ptr& rhs) {
return *lhs != *rhs;
}
private:
mutable std::unique_ptr<element_type> ptr;
};
#endif
| 2,562 | 26.858696 | 90 | h |
null | ceph-main/src/include/any.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef INCLUDE_STATIC_ANY
#define INCLUDE_STATIC_ANY
#include <any>
#include <cstddef>
#include <initializer_list>
#include <memory>
#include <typeinfo>
#include <type_traits>
#include <boost/smart_ptr/shared_ptr.hpp>
#include <boost/smart_ptr/make_shared.hpp>
namespace ceph {
namespace _any {
// Shared Functionality
// --------------------
//
// Common implementation details. Most functionality is here. We
// assume that destructors do not throw. Some of them might and
// they'll invoke terminate and that's fine.
//
// We are using the Curiously Recurring Template Pattern! We require
// that all classes inheriting from us provide:
//
// - `static constexpr size_t capacity`: Maximum capacity. No object
// larger than this may be
// stored. `dynamic` for dynamic.
// - `void* ptr() const noexcept`: returns a pointer to storage.
// (`alloc_storage` must have been called.
// `free_storage` must not have been called
// since.)
// - `void* alloc_storage(const std::size_t)`: allocate storage
// - `void free_storage() noexcept`: free storage. Must be idempotent.
//
// We provide most of the public interface, as well as the operator function,
// cast_helper, and the type() call.
// Set `capacity` to this value to indicate that there is no fixed
// capacity.
//
inline constexpr std::size_t dynamic = ~0;
// Driver Function
// ---------------
//
// The usual type-erasure control function trick. This one is simpler
// than usual since we punt on moving and copying. We could dispense
// with this and just store a deleter and a pointer to a typeinfo, but
// that would be twice the space.
//
// Moved out here so the type of `func_t` isn't dependent on the
// enclosing class.
//
enum class op { type, destroy };
template<typename T>
inline void op_func(const op o, void* p) noexcept {
static const std::type_info& type = typeid(T);
switch (o) {
case op::type:
*(reinterpret_cast<const std::type_info**>(p)) = &type;
break;
case op::destroy:
reinterpret_cast<T*>(p)->~T();
break;
}
}
using func_t = void (*)(const op, void* p) noexcept;
// The base class
// --------------
//
// The `storage_t` parameter gives the type of the value that manages
// storage and allocation. We use it to create a protected data member
// (named `storage`). This allows us to sidestep the problem in
// initialization order where, where exposed constructors were using
// trying to allocate or free storage *before* the data members of the
// derived class were initialized.
//
// Making storage_t a member type of the derived class won't work, due
// to C++'s rules for nested types being *horrible*. Just downright
// *horrible*.
//
template<typename D, typename storage_t>
class base {
// Make definitions from our superclass visible
// --------------------------------------------
//
// And check that they fit the requirements. At least those that are
// statically checkable.
//
static constexpr std::size_t capacity = D::capacity;
void* ptr() const noexcept {
static_assert(
noexcept(static_cast<const D*>(this)->ptr()) &&
std::is_same_v<decltype(static_cast<const D*>(this)->ptr()), void*>,
"‘void* ptr() const noexcept’ missing from superclass");
return static_cast<const D*>(this)->ptr();
}
void* alloc_storage(const std::size_t z) {
static_assert(
std::is_same_v<decltype(static_cast<D*>(this)->alloc_storage(z)), void*>,
"‘void* alloc_storage(const size_t)’ missing from superclass.");
return static_cast<D*>(this)->alloc_storage(z);
}
void free_storage() noexcept {
static_assert(
noexcept(static_cast<D*>(this)->free_storage()) &&
std::is_void_v<decltype(static_cast<D*>(this)->free_storage())>,
"‘void free_storage() noexcept’ missing from superclass.");
static_cast<D*>(this)->free_storage();
}
// Pile O' Templates
// -----------------
//
// These are just verbose and better typed once than twice. They're
// used for SFINAE and declaring noexcept.
//
template<class T>
struct is_in_place_type_helper : std::false_type {};
template<class T>
struct is_in_place_type_helper<std::in_place_type_t<T>> : std::true_type {};
template<class T>
static constexpr bool is_in_place_type_v =
is_in_place_type_helper<std::decay_t<T>>::value;
// SFINAE condition for value initialized
// constructors/assigners. This is analogous to the standard's
// requirement that this overload only participate in overload
// resolution if std::decay_t<T> is not the same type as the
// any-type, nor a specialization of std::in_place_type_t
//
template<typename T>
using value_condition_t = std::enable_if_t<
!std::is_same_v<std::decay_t<T>, D> &&
!is_in_place_type_v<std::decay_t<T>>>;
// This `noexcept` condition for value construction lets
// `immobile_any`'s value constructor/assigner be noexcept, so long
// as the type's copy or move constructor cooperates.
//
template<typename T>
static constexpr bool value_noexcept_v =
std::is_nothrow_constructible_v<std::decay_t<T>, T> && capacity != dynamic;
// SFINAE condition for in-place constructors/assigners
//
template<typename T, typename... Args>
using in_place_condition_t = std::enable_if_t<std::is_constructible_v<
std::decay_t<T>, Args...>>;
// Analogous to the above. Give noexcept to immobile_any::emplace
// when possible.
//
template<typename T, typename... Args>
static constexpr bool in_place_noexcept_v =
std::is_nothrow_constructible_v<std::decay_t<T>, Args...> &&
capacity != dynamic;
private:
// Functionality!
// --------------
// The driver function for the currently stored object. Whether this
// is null is the canonical way to know whether an instance has a
// value.
//
func_t func = nullptr;
// Construct an object within ourselves. As you can see we give the
// weak exception safety guarantee.
//
template<typename T, typename ...Args>
std::decay_t<T>& construct(Args&& ...args) {
using Td = std::decay_t<T>;
static_assert(capacity == dynamic || sizeof(Td) <= capacity,
"Supplied type is too large for this specialization.");
try {
func = &op_func<Td>;
return *new (reinterpret_cast<Td*>(alloc_storage(sizeof(Td))))
Td(std::forward<Args>(args)...);
} catch (...) {
reset();
throw;
}
}
protected:
// We hold the storage, even if the superclass class manipulates it,
// so that its default initialization comes soon enough for us to
// use it in our constructors.
//
storage_t storage;
public:
base() noexcept = default;
~base() noexcept {
reset();
}
protected:
// Since some of our derived classes /can/ be copied or moved.
//
base(const base& rhs) noexcept : func(rhs.func) {
if constexpr (std::is_copy_assignable_v<storage_t>) {
storage = rhs.storage;
}
}
base& operator =(const base& rhs) noexcept {
reset();
func = rhs.func;
if constexpr (std::is_copy_assignable_v<storage_t>) {
storage = rhs.storage;
}
return *this;
}
base(base&& rhs) noexcept : func(std::move(rhs.func)) {
if constexpr (std::is_move_assignable_v<storage_t>) {
storage = std::move(rhs.storage);
}
rhs.func = nullptr;
}
base& operator =(base&& rhs) noexcept {
reset();
func = rhs.func;
if constexpr (std::is_move_assignable_v<storage_t>) {
storage = std::move(rhs.storage);
}
rhs.func = nullptr;
return *this;
}
public:
// Value construct/assign
// ----------------------
//
template<typename T,
typename = value_condition_t<T>>
base(T&& t) noexcept(value_noexcept_v<T>) {
construct<T>(std::forward<T>(t));
}
// On exception, *this is set to empty.
//
template<typename T,
typename = value_condition_t<T>>
base& operator =(T&& t) noexcept(value_noexcept_v<T>) {
reset();
construct<T>(std::forward<T>(t));
return *this;
}
// In-place construct/assign
// -------------------------
//
// I really hate the way the C++ standard library treats references
// as if they were stepchildren in a Charles Dickens novel. I am
// quite upset that std::optional lacks a specialization for
// references. There's no legitimate reason for it. The whole
// 're-seat or refuse' debate is simply a canard. The optional is
// effectively a container, so of course it can be emptied or
// reassigned. No, pointers are not an acceptable substitute. A
// pointer gives an address in memory which may be null and which
// may represent an object or may a location in which an object is
// to be created. An optional reference, on the other hand, is a
// reference to an initialized, live object or /empty/. This is an
// obvious difference that should be communicable to any programmer
// reading the code through the type system.
//
// `std::any`, even in the case of in-place construction,
// only stores the decayed type. I suspect this was to get around
// the question of whether, for a std::any holding a T&,
// std::any_cast<T> should return a copy or throw
// std::bad_any_cast.
//
// I think the appropriate response in that case would be to make a
// copy if the type supports it and fail otherwise. Once a concrete
// type is known the problem solves itself.
//
// If one were inclined, one could easily load the driver function
// with a heavy subset of the type traits (those that depend only on
// the type in question) and simply /ask/ whether it's a reference.
//
// At the moment, I'm maintaining compatibility with the standard
// library except for copy/move semantics.
//
template<typename T,
typename... Args,
typename = in_place_condition_t<T, Args...>>
base(std::in_place_type_t<T>,
Args&& ...args) noexcept(in_place_noexcept_v<T, Args...>) {
construct<T>(std::forward<Args>(args)...);
}
// On exception, *this is set to empty.
//
template<typename T,
typename... Args,
typename = in_place_condition_t<T>>
std::decay_t<T>& emplace(Args&& ...args) noexcept(in_place_noexcept_v<
T, Args...>) {
reset();
return construct<T>(std::forward<Args>(args)...);
}
template<typename T,
typename U,
typename... Args,
typename = in_place_condition_t<T, std::initializer_list<U>,
Args...>>
base(std::in_place_type_t<T>,
std::initializer_list<U> i,
Args&& ...args) noexcept(in_place_noexcept_v<T, std::initializer_list<U>,
Args...>) {
construct<T>(i, std::forward<Args>(args)...);
}
// On exception, *this is set to empty.
//
template<typename T,
typename U,
typename... Args,
typename = in_place_condition_t<T, std::initializer_list<U>,
Args...>>
std::decay_t<T>& emplace(std::initializer_list<U> i,
Args&& ...args) noexcept(in_place_noexcept_v<T,
std::initializer_list<U>,
Args...>) {
reset();
return construct<T>(i,std::forward<Args>(args)...);
}
// Empty ourselves, using the subclass to free any storage.
//
void reset() noexcept {
if (has_value()) {
func(op::destroy, ptr());
func = nullptr;
}
free_storage();
}
template<typename U = storage_t,
typename = std::enable_if<std::is_swappable_v<storage_t>>>
void swap(base& rhs) {
using std::swap;
swap(func, rhs.func);
swap(storage, rhs.storage);
}
// All other functions should use this function to test emptiness
// rather than examining `func` directly.
//
bool has_value() const noexcept {
return !!func;
}
// Returns the type of the value stored, if any.
//
const std::type_info& type() const noexcept {
if (has_value()) {
const std::type_info* t;
func(op::type, reinterpret_cast<void*>(&t));
return *t;
} else {
return typeid(void);
}
}
template<typename T, typename U, typename V>
friend inline void* cast_helper(const base<U, V>& b) noexcept;
};
// Function used by all `any_cast` functions
//
// Returns a void* to the contents if they exist and match the
// requested type, otherwise `nullptr`.
//
template<typename T, typename U, typename V>
inline void* cast_helper(const base<U, V>& b) noexcept {
if (b.func && ((&op_func<T> == b.func) ||
(b.type() == typeid(T)))) {
return b.ptr();
} else {
return nullptr;
}
}
}
// `any_cast`
// ==========
//
// Just the usual gamut of `any_cast` overloads. These get a bit
// repetitive and it would be nice to think of a way to collapse them
// down a bit.
//
// The pointer pair!
//
template<typename T, typename U, typename V>
inline T* any_cast(_any::base<U, V>* a) noexcept {
if (a) {
return static_cast<T*>(_any::cast_helper<std::decay_t<T>>(*a));
}
return nullptr;
}
template<typename T, typename U, typename V>
inline const T* any_cast(const _any::base<U, V>* a) noexcept {
if (a) {
return static_cast<T*>(_any::cast_helper<std::decay_t<T>>(*a));
}
return nullptr;
}
// While we disallow copying the immobile any itself, we can allow
// anything with an extracted value that the type supports.
//
template<typename T, typename U, typename V>
inline T any_cast(_any::base<U, V>& a) {
static_assert(std::is_reference_v<T> ||
std::is_copy_constructible_v<T>,
"The supplied type must be either a reference or "
"copy constructible.");
auto p = any_cast<std::decay_t<T>>(&a);
if (p) {
return static_cast<T>(*p);
}
throw std::bad_any_cast();
}
template<typename T, typename U, typename V>
inline T any_cast(const _any::base<U, V>& a) {
static_assert(std::is_reference_v<T> ||
std::is_copy_constructible_v<T>,
"The supplied type must be either a reference or "
"copy constructible.");
auto p = any_cast<std::decay_t<T>>(&a);
if (p) {
return static_cast<T>(*p);
}
throw std::bad_any_cast();
}
template<typename T, typename U, typename V>
inline std::enable_if_t<(std::is_move_constructible_v<T> ||
std::is_copy_constructible_v<T>) &&
!std::is_rvalue_reference_v<T>, T>
any_cast(_any::base<U, V>&& a) {
auto p = any_cast<std::decay_t<T>>(&a);
if (p) {
return std::move((*p));
}
throw std::bad_any_cast();
}
template<typename T, typename U, typename V>
inline std::enable_if_t<std::is_rvalue_reference_v<T>, T>
any_cast(_any::base<U, V>&& a) {
auto p = any_cast<std::decay_t<T>>(&a);
if (p) {
return static_cast<T>(*p);
}
throw std::bad_any_cast();
}
// `immobile_any`
// ==============
//
// Sometimes, uncopyable objects exist and I want to do things with
// them. The C++ standard library is really quite keen on insisting
// things be copyable before it deigns to work. I find this annoying.
//
// Also, the allocator, while useful, is really not considerate of
// other people's time. Every time we go to visit it, it takes us
// quite an awfully long time to get away again. As such, I've been
// trying to avoid its company whenever it is convenient and seemly.
//
// We accept any type that will fit in the declared capacity. You may
// store types with throwing destructors, but terminate will be
// invoked when they throw.
//
template<std::size_t S>
class immobile_any : public _any::base<immobile_any<S>,
std::aligned_storage_t<S>> {
using base = _any::base<immobile_any<S>, std::aligned_storage_t<S>>;
friend base;
using _any::base<immobile_any<S>, std::aligned_storage_t<S>>::storage;
// Superclass requirements!
// ------------------------
//
// Simple as anything. We have a buffer of fixed size and return the
// pointer to it when asked.
//
static constexpr std::size_t capacity = S;
void* ptr() const noexcept {
return const_cast<void*>(static_cast<const void*>(&storage));
}
void* alloc_storage(std::size_t) noexcept {
return ptr();
}
void free_storage() noexcept {}
static_assert(capacity != _any::dynamic,
"That is not a valid size for an immobile_any.");
public:
immobile_any() noexcept = default;
immobile_any(const immobile_any&) = delete;
immobile_any& operator =(const immobile_any&) = delete;
immobile_any(immobile_any&&) = delete;
immobile_any& operator =(immobile_any&&) = delete;
using base::base;
using base::operator =;
void swap(immobile_any&) = delete;
};
template<typename T, std::size_t S, typename... Args>
inline immobile_any<S> make_immobile_any(Args&& ...args) {
return immobile_any<S>(std::in_place_type<T>, std::forward<Args>(args)...);
}
template<typename T, std::size_t S, typename U, typename... Args>
inline immobile_any<S> make_immobile_any(std::initializer_list<U> i, Args&& ...args) {
return immobile_any<S>(std::in_place_type<T>, i, std::forward<Args>(args)...);
}
// `unique_any`
// ============
//
// Oh dear. Now we're getting back into allocation. You don't think
// the allocator noticed all those mean things we said about it, do
// you?
//
// Well. Okay, allocator. Sometimes when it's the middle of the night
// and you're writing template code you say things you don't exactly
// mean. If it weren't for you, we wouldn't have any memory to run all
// our programs in at all. Really, I'm just being considerate of
// *your* needs, trying to avoid having to run to you every time we
// instantiate a type, making a few that can be self-sufficient…uh…
//
// **Anyway**, this is movable but not copyable, as you should expect
// from anything with ‘unique’ in the name.
//
class unique_any : public _any::base<unique_any, std::unique_ptr<std::byte[]>> {
using base = _any::base<unique_any, std::unique_ptr<std::byte[]>>;
friend base;
using base::storage;
// Superclass requirements
// -----------------------
//
// Our storage is a single chunk of RAM owned by a
// `std::unique_ptr`.
//
static constexpr std::size_t capacity = _any::dynamic;
void* ptr() const noexcept {
return static_cast<void*>(storage.get());
return nullptr;
}
void* alloc_storage(const std::size_t z) {
storage.reset(new std::byte[z]);
return ptr();
}
void free_storage() noexcept {
storage.reset();
}
public:
unique_any() noexcept = default;
~unique_any() noexcept = default;
unique_any(const unique_any&) = delete;
unique_any& operator =(const unique_any&) = delete;
// We can rely on the behavior of `unique_ptr` and the base class to
// give us a default move constructor that does the right thing.
//
unique_any(unique_any&& rhs) noexcept = default;
unique_any& operator =(unique_any&& rhs) = default;
using base::base;
using base::operator =;
};
inline void swap(unique_any& lhs, unique_any& rhs) noexcept {
lhs.swap(rhs);
}
template<typename T, typename... Args>
inline unique_any make_unique_any(Args&& ...args) {
return unique_any(std::in_place_type<T>, std::forward<Args>(args)...);
}
template<typename T, typename U, typename... Args>
inline unique_any make_unique_any(std::initializer_list<U> i, Args&& ...args) {
return unique_any(std::in_place_type<T>, i, std::forward<Args>(args)...);
}
// `shared_any`
// ============
//
// Once more with feeling!
//
// This is both copyable *and* movable. In case you need that sort of
// thing. It seemed a reasonable completion.
//
class shared_any : public _any::base<shared_any, boost::shared_ptr<std::byte[]>> {
using base = _any::base<shared_any, boost::shared_ptr<std::byte[]>>;
friend base;
using base::storage;
// Superclass requirements
// -----------------------
//
// Our storage is a single chunk of RAM allocated from the
// heap. This time it's owned by a `boost::shared_ptr` so we can use
// `boost::make_shared_noinit`. (This lets us get the optimization
// that allocates array and control block in one without wasting
// time on `memset`.)
//
static constexpr std::size_t capacity = _any::dynamic;
void* ptr() const noexcept {
return static_cast<void*>(storage.get());
}
void* alloc_storage(std::size_t n) {
storage = boost::make_shared_noinit<std::byte[]>(n);
return ptr();
}
void free_storage() noexcept {
storage.reset();
}
public:
shared_any() noexcept = default;
~shared_any() noexcept = default;
shared_any(const shared_any& rhs) noexcept = default;
shared_any& operator =(const shared_any&) noexcept = default;
shared_any(shared_any&& rhs) noexcept = default;
shared_any& operator =(shared_any&& rhs) noexcept = default;
using base::base;
using base::operator =;
};
inline void swap(shared_any& lhs, shared_any& rhs) noexcept {
lhs.swap(rhs);
}
template<typename T, typename... Args>
inline shared_any make_shared_any(Args&& ...args) {
return shared_any(std::in_place_type<T>, std::forward<Args>(args)...);
}
template<typename T, typename U, typename... Args>
inline shared_any make_shared_any(std::initializer_list<U> i, Args&& ...args) {
return shared_any(std::in_place_type<T>, i, std::forward<Args>(args)...);
}
}
#endif // INCLUDE_STATIC_ANY
| 21,732 | 29.82695 | 86 | h |
null | ceph-main/src/include/bitmapper.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BITMAPPER_H
#define CEPH_BITMAPPER_H
class bitmapper {
char *_data;
int _len;
public:
bitmapper() : _data(0), _len(0) { }
bitmapper(char *data, int len) : _data(data), _len(len) { }
void set_data(char *data, int len) { _data = data; _len = len; }
int bytes() const { return _len; }
int bits() const { return _len * 8; }
bool operator[](int b) const {
return get(b);
}
bool get(int b) const {
return _data[b >> 3] & (1 << (b&7));
}
void set(int b) {
_data[b >> 3] |= 1 << (b&7);
}
void clear(int b) {
_data[b >> 3] &= ~(1 << (b&7));
}
void toggle(int b) {
_data[b >> 3] ^= 1 << (b&7);
}
};
#endif
| 1,099 | 21.44898 | 71 | h |
null | ceph-main/src/include/blobhash.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BLOBHASH_H
#define CEPH_BLOBHASH_H
#include <cstdint>
#include "hash.h"
class blobhash {
public:
uint32_t operator()(const void* p, size_t len) {
static rjhash<std::uint32_t> H;
std::uint32_t acc = 0;
auto buf = static_cast<const unsigned char*>(p);
while (len >= sizeof(acc)) {
acc ^= unaligned_load(buf);
buf += sizeof(std::uint32_t);
len -= sizeof(std::uint32_t);
}
// handle the last few bytes of p[-(len % 4):]
switch (len) {
case 3:
acc ^= buf[2] << 16;
[[fallthrough]];
case 2:
acc ^= buf[1] << 8;
[[fallthrough]];
case 1:
acc ^= buf[0];
}
return H(acc);
}
private:
static inline std::uint32_t unaligned_load(const unsigned char* p) {
std::uint32_t result;
__builtin_memcpy(&result, p, sizeof(result));
return result;
}
};
#endif
| 1,272 | 22.574074 | 71 | h |
null | ceph-main/src/include/btree_map.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_INCLUDE_BTREE_MAP_H
#define CEPH_INCLUDE_BTREE_MAP_H
#include "include/cpp-btree/btree.h"
#include "include/cpp-btree/btree_map.h"
#include "include/ceph_assert.h" // cpp-btree uses system assert, blech
#include "include/encoding.h"
template<class T, class U>
inline void encode(const btree::btree_map<T,U>& m, ceph::buffer::list& bl)
{
using ceph::encode;
__u32 n = (__u32)(m.size());
encode(n, bl);
for (typename btree::btree_map<T,U>::const_iterator p = m.begin(); p != m.end(); ++p) {
encode(p->first, bl);
encode(p->second, bl);
}
}
template<class T, class U>
inline void encode(const btree::btree_map<T,U>& m, ceph::buffer::list& bl, uint64_t features)
{
using ceph::encode;
__u32 n = (__u32)(m.size());
encode(n, bl);
for (typename btree::btree_map<T,U>::const_iterator p = m.begin(); p != m.end(); ++p) {
encode(p->first, bl, features);
encode(p->second, bl, features);
}
}
template<class T, class U>
inline void decode(btree::btree_map<T,U>& m, ceph::buffer::list::const_iterator& p)
{
using ceph::decode;
__u32 n;
decode(n, p);
m.clear();
while (n--) {
T k;
decode(k, p);
decode(m[k], p);
}
}
template<class T, class U>
inline void encode_nohead(const btree::btree_map<T,U>& m, ceph::buffer::list& bl)
{
using ceph::encode;
for (typename btree::btree_map<T,U>::const_iterator p = m.begin(); p != m.end(); ++p) {
encode(p->first, bl);
encode(p->second, bl);
}
}
template<class T, class U>
inline void decode_nohead(int n, btree::btree_map<T,U>& m, ceph::buffer::list::const_iterator& p)
{
using ceph::decode;
m.clear();
while (n--) {
T k;
decode(k, p);
decode(m[k], p);
}
}
#endif
| 1,801 | 25.115942 | 97 | h |
null | ceph-main/src/include/buffer_raw.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BUFFER_RAW_H
#define CEPH_BUFFER_RAW_H
#include <map>
#include <utility>
#include <type_traits>
#include "common/ceph_atomic.h"
#include "include/buffer.h"
#include "include/mempool.h"
#include "include/spinlock.h"
namespace ceph::buffer {
inline namespace v15_2_0 {
class raw {
public:
// In the future we might want to have a slab allocator here with few
// embedded slots. This would allow to avoid the "if" in dtor of ptr_node.
std::aligned_storage<sizeof(ptr_node),
alignof(ptr_node)>::type bptr_storage;
protected:
char *data;
unsigned len;
public:
ceph::atomic<unsigned> nref { 0 };
int mempool;
std::pair<size_t, size_t> last_crc_offset {std::numeric_limits<size_t>::max(), std::numeric_limits<size_t>::max()};
std::pair<uint32_t, uint32_t> last_crc_val;
mutable ceph::spinlock crc_spinlock;
explicit raw(unsigned l, int mempool=mempool::mempool_buffer_anon)
: data(nullptr), len(l), nref(0), mempool(mempool) {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(1, len);
}
raw(char *c, unsigned l, int mempool=mempool::mempool_buffer_anon)
: data(c), len(l), nref(0), mempool(mempool) {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(1, len);
}
virtual ~raw() {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(
-1, -(int)len);
}
void _set_len(unsigned l) {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(
-1, -(int)len);
len = l;
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(1, len);
}
void reassign_to_mempool(int pool) {
if (pool == mempool) {
return;
}
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(
-1, -(int)len);
mempool = pool;
mempool::get_pool(mempool::pool_index_t(pool)).adjust_count(1, len);
}
void try_assign_to_mempool(int pool) {
if (mempool == mempool::mempool_buffer_anon) {
reassign_to_mempool(pool);
}
}
private:
// no copying.
// cppcheck-suppress noExplicitConstructor
raw(const raw &other) = delete;
const raw& operator=(const raw &other) = delete;
public:
char *get_data() const {
return data;
}
unsigned get_len() const {
return len;
}
bool get_crc(const std::pair<size_t, size_t> &fromto,
std::pair<uint32_t, uint32_t> *crc) const {
std::lock_guard lg(crc_spinlock);
if (last_crc_offset == fromto) {
*crc = last_crc_val;
return true;
}
return false;
}
void set_crc(const std::pair<size_t, size_t> &fromto,
const std::pair<uint32_t, uint32_t> &crc) {
std::lock_guard lg(crc_spinlock);
last_crc_offset = fromto;
last_crc_val = crc;
}
void invalidate_crc() {
std::lock_guard lg(crc_spinlock);
last_crc_offset.first = std::numeric_limits<size_t>::max();
last_crc_offset.second = std::numeric_limits<size_t>::max();
}
};
} // inline namespace v15_2_0
} // namespace ceph::buffer
#endif // CEPH_BUFFER_RAW_H
| 3,507 | 27.991736 | 119 | h |
null | ceph-main/src/include/ceph_assert.h | #ifndef CEPH_ASSERT_H
#define CEPH_ASSERT_H
#include <cstdlib>
#include <string>
#ifndef __STRING
# define __STRING(x) #x
#endif
#if defined(__linux__)
#include <features.h>
#elif defined(__FreeBSD__)
#include <sys/cdefs.h>
#define __GNUC_PREREQ(minor, major) __GNUC_PREREQ__(minor, major)
#elif defined(__sun) || defined(_AIX)
#include "include/compat.h"
#include <assert.h>
#endif
#ifdef __CEPH__
# include "acconfig.h"
#endif
#include "include/common_fwd.h"
namespace ceph {
struct BackTrace;
/*
* Select a function-name variable based on compiler tests, and any compiler
* specific overrides.
*/
#if defined(HAVE_PRETTY_FUNC)
# define __CEPH_ASSERT_FUNCTION __PRETTY_FUNCTION__
#elif defined(HAVE_FUNC)
# define __CEPH_ASSERT_FUNCTION __func__
#else
# define __CEPH_ASSERT_FUNCTION ((__const char *) 0)
#endif
extern void register_assert_context(CephContext *cct);
struct assert_data {
const char *assertion;
const char *file;
const int line;
const char *function;
};
extern void __ceph_assert_fail(const char *assertion, const char *file, int line, const char *function)
__attribute__ ((__noreturn__));
extern void __ceph_assert_fail(const assert_data &ctx)
__attribute__ ((__noreturn__));
extern void __ceph_assertf_fail(const char *assertion, const char *file, int line, const char *function, const char* msg, ...)
__attribute__ ((__noreturn__));
extern void __ceph_assert_warn(const char *assertion, const char *file, int line, const char *function);
[[noreturn]] void __ceph_abort(const char *file, int line, const char *func,
const std::string& msg);
[[noreturn]] void __ceph_abortf(const char *file, int line, const char *func,
const char* msg, ...);
#define _CEPH_ASSERT_VOID_CAST static_cast<void>
#define assert_warn(expr) \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assert_warn (__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION))
}
using namespace ceph;
/*
* ceph_abort aborts the program with a nice backtrace.
*
* Currently, it's the same as assert(0), but we may one day make assert a
* debug-only thing, like it is in many projects.
*/
#define ceph_abort(msg, ...) \
::ceph::__ceph_abort( __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, "abort() called")
#define ceph_abort_msg(msg) \
::ceph::__ceph_abort( __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, msg)
#define ceph_abort_msgf(...) \
::ceph::__ceph_abortf( __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, __VA_ARGS__)
#ifdef __SANITIZE_ADDRESS__
#define ceph_assert(expr) \
do { \
((expr)) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assert_fail(__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION); \
} while (false)
#else
#define ceph_assert(expr) \
do { static const ceph::assert_data assert_data_ctx = \
{__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION}; \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assert_fail(assert_data_ctx)); } while(false)
#endif
// this variant will *never* get compiled out to NDEBUG in the future.
// (ceph_assert currently doesn't either, but in the future it might.)
#ifdef __SANITIZE_ADDRESS__
#define ceph_assert_always(expr) \
do { \
((expr)) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assert_fail(__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION); \
} while(false)
#else
#define ceph_assert_always(expr) \
do { static const ceph::assert_data assert_data_ctx = \
{__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION}; \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assert_fail(assert_data_ctx)); } while(false)
#endif
// Named by analogy with printf. Along with an expression, takes a format
// string and parameters which are printed if the assertion fails.
#define assertf(expr, ...) \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assertf_fail (__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, __VA_ARGS__))
#define ceph_assertf(expr, ...) \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assertf_fail (__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, __VA_ARGS__))
// this variant will *never* get compiled out to NDEBUG in the future.
// (ceph_assertf currently doesn't either, but in the future it might.)
#define ceph_assertf_always(expr, ...) \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assertf_fail (__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, __VA_ARGS__))
#endif
| 5,066 | 33.236486 | 126 | h |
null | ceph-main/src/include/ceph_features.h | #ifndef __CEPH_FEATURES
#define __CEPH_FEATURES
#include "sys/types.h"
/*
* Each time we reclaim bits for reuse we need to specify another
* bitmask that, if all bits are set, indicates we have the new
* incarnation of that feature. Base case is 1 (first use)
*/
#define CEPH_FEATURE_INCARNATION_1 (0ull)
#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // SERVER_JEWEL
#define CEPH_FEATURE_INCARNATION_3 ((1ull<<57)|(1ull<<28)) // SERVER_MIMIC
#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \
const static uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
const static uint64_t CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
// this bit is ignored but still advertised by release *when*
#define DEFINE_CEPH_FEATURE_DEPRECATED(bit, incarnation, name, when) \
const static uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
const static uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
// this bit is ignored by release *unused* and not advertised by
// release *unadvertised*
#define DEFINE_CEPH_FEATURE_RETIRED(bit, inc, name, unused, unadvertised)
// test for a feature. this test is safer than a typical mask against
// the bit because it ensures that we have the bit AND the marker for the
// bit's incarnation. this must be used in any case where the features
// bits may include an old meaning of the bit.
#define HAVE_FEATURE(x, name) \
(((x) & (CEPH_FEATUREMASK_##name)) == (CEPH_FEATUREMASK_##name))
/*
* Notes on deprecation:
*
* For feature bits used *only* on the server-side:
*
* - In the first phase we indicate that a feature is DEPRECATED as of
* a particular release. This is the first major release X (say,
* mimic) that does not depend on its peers advertising the feature.
* That is, it safely assumes its peers all have the feature. We
* indicate this with the DEPRECATED macro. For example,
*
* DEFINE_CEPH_FEATURE_DEPRECATED( 2, 1, MON_METADATA, MIMIC)
*
* because 13.2.z (mimic) did not care if its peers advertised this
* feature bit.
*
* - In the second phase we stop advertising the the bit and call it
* RETIRED. This can normally be done 2 major releases
* following the one in which we marked the feature DEPRECATED. In
* the above example, for 15.0.z (octopus) we can say:
*
* DEFINE_CEPH_FEATURE_RETIRED( 2, 1, MON_METADATA, MIMIC, OCTOPUS)
*
* - The bit can be reused in the next release that will never talk to
* a pre-octopus daemon (13 mimic or 14 nautlius) that advertises the
* bit: in this case, the 16.y.z (P-release).
*
* This ensures that no two versions who have different meanings for
* the bit ever speak to each other.
*/
/*
* Notes on the kernel client:
*
* - "X" means that the feature bit has been advertised and supported
* since kernel X
*
* - "X req" means that the feature bit has been advertised and required
* since kernel X
*
* The remaining feature bits are not and have never been used by the
* kernel client.
*/
DEFINE_CEPH_FEATURE( 0, 1, UID)
DEFINE_CEPH_FEATURE( 1, 1, NOSRCADDR) // 2.6.35 req
DEFINE_CEPH_FEATURE_RETIRED( 2, 1, MONCLOCKCHECK, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE( 2, 3, SERVER_NAUTILUS)
DEFINE_CEPH_FEATURE( 3, 1, FLOCK) // 2.6.36
DEFINE_CEPH_FEATURE( 4, 1, SUBSCRIBE2) // 4.6 req
DEFINE_CEPH_FEATURE( 5, 1, MONNAMES)
DEFINE_CEPH_FEATURE( 6, 1, RECONNECT_SEQ) // 3.10 req
DEFINE_CEPH_FEATURE( 7, 1, DIRLAYOUTHASH) // 2.6.38
DEFINE_CEPH_FEATURE( 8, 1, OBJECTLOCATOR)
DEFINE_CEPH_FEATURE( 9, 1, PGID64) // 3.9 req
DEFINE_CEPH_FEATURE(10, 1, INCSUBOSDMAP)
DEFINE_CEPH_FEATURE(11, 1, PGPOOL3) // 3.9 req
DEFINE_CEPH_FEATURE(12, 1, OSDREPLYMUX)
DEFINE_CEPH_FEATURE(13, 1, OSDENC) // 3.9 req
DEFINE_CEPH_FEATURE_RETIRED(14, 1, OMAP, HAMMER, JEWEL)
DEFINE_CEPH_FEATURE(14, 2, SERVER_KRAKEN)
DEFINE_CEPH_FEATURE(15, 1, MONENC)
DEFINE_CEPH_FEATURE_RETIRED(16, 1, QUERY_T, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(16, 3, SERVER_OCTOPUS)
DEFINE_CEPH_FEATURE(16, 3, OSD_REPOP_MLCOD)
DEFINE_CEPH_FEATURE_RETIRED(17, 1, INDEP_PG_MAP, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(17, 3, OS_PERF_STAT_NS)
DEFINE_CEPH_FEATURE(18, 1, CRUSH_TUNABLES) // 3.6
DEFINE_CEPH_FEATURE_RETIRED(19, 1, CHUNKY_SCRUB, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(19, 2, OSD_PGLOG_HARDLIMIT)
DEFINE_CEPH_FEATURE_RETIRED(20, 1, MON_NULLROUTE, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(20, 3, SERVER_PACIFIC)
DEFINE_CEPH_FEATURE_RETIRED(21, 1, MON_GV, HAMMER, JEWEL)
DEFINE_CEPH_FEATURE(21, 2, SERVER_LUMINOUS) // 4.13
DEFINE_CEPH_FEATURE(21, 2, RESEND_ON_SPLIT) // overlap
DEFINE_CEPH_FEATURE(21, 2, RADOS_BACKOFF) // overlap
DEFINE_CEPH_FEATURE(21, 2, OSDMAP_PG_UPMAP) // overlap
DEFINE_CEPH_FEATURE(21, 2, CRUSH_CHOOSE_ARGS) // overlap
DEFINE_CEPH_FEATURE_RETIRED(22, 1, BACKFILL_RESERVATION, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(22, 2, OSD_FIXED_COLLECTION_LIST)
DEFINE_CEPH_FEATURE(23, 1, MSG_AUTH) // 3.19 req (unless nocephx_require_signatures)
DEFINE_CEPH_FEATURE_RETIRED(24, 1, RECOVERY_RESERVATION, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(24, 2, RECOVERY_RESERVATION_2)
DEFINE_CEPH_FEATURE(25, 1, CRUSH_TUNABLES2) // 3.9
DEFINE_CEPH_FEATURE(26, 1, CREATEPOOLID)
DEFINE_CEPH_FEATURE(27, 1, REPLY_CREATE_INODE) // 3.9
DEFINE_CEPH_FEATURE_RETIRED(28, 1, OSD_HBMSGS, HAMMER, JEWEL)
DEFINE_CEPH_FEATURE(28, 2, SERVER_MIMIC)
DEFINE_CEPH_FEATURE(29, 1, MDSENC) // 4.7
DEFINE_CEPH_FEATURE(30, 1, OSDHASHPSPOOL) // 3.9
DEFINE_CEPH_FEATURE_RETIRED(31, 1, MON_SINGLE_PAXOS, NAUTILUS, PACIFIC)
DEFINE_CEPH_FEATURE(31, 3, SERVER_REEF)
DEFINE_CEPH_FEATURE_RETIRED(32, 1, OSD_SNAPMAPPER, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(32, 3, STRETCH_MODE)
DEFINE_CEPH_FEATURE_RETIRED(33, 1, MON_SCRUB, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(33, 3, SERVER_QUINCY)
DEFINE_CEPH_FEATURE_RETIRED(34, 1, OSD_PACKED_RECOVERY, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(34, 3, RANGE_BLOCKLIST)
DEFINE_CEPH_FEATURE(35, 1, OSD_CACHEPOOL) // 3.14
DEFINE_CEPH_FEATURE(36, 1, CRUSH_V2) // 3.14
DEFINE_CEPH_FEATURE(37, 1, EXPORT_PEER) // 3.14
DEFINE_CEPH_FEATURE_RETIRED(38, 1, OSD_ERASURE_CODES, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE(39, 1, OSDMAP_ENC) // 3.15
DEFINE_CEPH_FEATURE(40, 1, MDS_INLINE_DATA) // 3.19
DEFINE_CEPH_FEATURE(41, 1, CRUSH_TUNABLES3) // 3.15
DEFINE_CEPH_FEATURE(41, 1, OSD_PRIMARY_AFFINITY) // overlap
DEFINE_CEPH_FEATURE(42, 1, MSGR_KEEPALIVE2) // 4.3 (for consistency)
DEFINE_CEPH_FEATURE(43, 1, OSD_POOLRESEND) // 4.13
DEFINE_CEPH_FEATURE_RETIRED(44, 1, ERASURE_CODE_PLUGINS_V2, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(45, 1, OSD_SET_ALLOC_HINT, JEWEL, LUMINOUS)
// available
DEFINE_CEPH_FEATURE(46, 1, OSD_FADVISE_FLAGS)
DEFINE_CEPH_FEATURE_RETIRED(46, 1, OSD_REPOP, JEWEL, LUMINOUS) // overlap
DEFINE_CEPH_FEATURE_RETIRED(46, 1, OSD_OBJECT_DIGEST, JEWEL, LUMINOUS) // overlap
DEFINE_CEPH_FEATURE_RETIRED(46, 1, OSD_TRANSACTION_MAY_LAYOUT, JEWEL, LUMINOUS) // overlap
DEFINE_CEPH_FEATURE(47, 1, MDS_QUOTA) // 4.17
DEFINE_CEPH_FEATURE(48, 1, CRUSH_V4) // 4.1
DEFINE_CEPH_FEATURE_RETIRED(49, 1, OSD_MIN_SIZE_RECOVERY, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE_RETIRED(49, 1, OSD_PROXY_FEATURES, JEWEL, LUMINOUS) // overlap
// available
DEFINE_CEPH_FEATURE_RETIRED(50, 1, MON_METADATA, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(51, 1, OSD_BITWISE_HOBJ_SORT, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(52, 1, OSD_PROXY_WRITE_FEATURES, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(53, 1, ERASURE_CODE_PLUGINS_V3, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(54, 1, OSD_HITSET_GMT, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(55, 1, HAMMER_0_94_4, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE(56, 1, NEW_OSDOP_ENCODING) // 4.13 (for pg_pool_t >= v25)
DEFINE_CEPH_FEATURE(57, 1, MON_STATEFUL_SUB) // 4.13
DEFINE_CEPH_FEATURE_RETIRED(57, 1, MON_ROUTE_OSDMAP, MIMIC, OCTOPUS) // overlap
DEFINE_CEPH_FEATURE(57, 1, SERVER_JEWEL) // overlap
DEFINE_CEPH_FEATURE(58, 1, CRUSH_TUNABLES5) // 4.5
DEFINE_CEPH_FEATURE(58, 1, NEW_OSDOPREPLY_ENCODING) // overlap
DEFINE_CEPH_FEATURE(58, 1, FS_FILE_LAYOUT_V2) // overlap
DEFINE_CEPH_FEATURE(59, 1, FS_BTIME)
DEFINE_CEPH_FEATURE(59, 1, FS_CHANGE_ATTR) // overlap
DEFINE_CEPH_FEATURE(59, 1, MSG_ADDR2) // overlap
DEFINE_CEPH_FEATURE(60, 1, OSD_RECOVERY_DELETES) // *do not share this bit*
DEFINE_CEPH_FEATURE(61, 1, CEPHX_V2) // 4.19, *do not share this bit*
DEFINE_CEPH_FEATURE(62, 1, RESERVED) // do not use; used as a sentinel
DEFINE_CEPH_FEATURE_RETIRED(63, 1, RESERVED_BROKEN, LUMINOUS, QUINCY) // client-facing
// available
/*
* Features supported. Should be everything above.
*/
#define CEPH_FEATURES_ALL \
(CEPH_FEATURE_UID | \
CEPH_FEATURE_NOSRCADDR | \
CEPH_FEATURE_FLOCK | \
CEPH_FEATURE_SUBSCRIBE2 | \
CEPH_FEATURE_MONNAMES | \
CEPH_FEATURE_RECONNECT_SEQ | \
CEPH_FEATURE_DIRLAYOUTHASH | \
CEPH_FEATURE_OBJECTLOCATOR | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_INCSUBOSDMAP | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDREPLYMUX | \
CEPH_FEATURE_OSDENC | \
CEPH_FEATURE_MONENC | \
CEPH_FEATURE_CRUSH_TUNABLES | \
CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_CREATEPOOLID | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
CEPH_FEATURE_MDSENC | \
CEPH_FEATURE_OSDHASHPSPOOL | \
CEPH_FEATURE_NEW_OSDOP_ENCODING | \
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
CEPH_FEATURE_OSD_CACHEPOOL | \
CEPH_FEATURE_CRUSH_V2 | \
CEPH_FEATURE_EXPORT_PEER | \
CEPH_FEATURE_OSDMAP_ENC | \
CEPH_FEATURE_MDS_INLINE_DATA | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_OSD_POOLRESEND | \
CEPH_FEATURE_OSD_FADVISE_FLAGS | \
CEPH_FEATURE_MDS_QUOTA | \
CEPH_FEATURE_CRUSH_V4 | \
CEPH_FEATURE_MON_STATEFUL_SUB | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
CEPH_FEATURE_SERVER_JEWEL | \
CEPH_FEATURE_FS_FILE_LAYOUT_V2 | \
CEPH_FEATURE_SERVER_KRAKEN | \
CEPH_FEATURE_FS_BTIME | \
CEPH_FEATURE_FS_CHANGE_ATTR | \
CEPH_FEATURE_MSG_ADDR2 | \
CEPH_FEATURE_SERVER_LUMINOUS | \
CEPH_FEATURE_RESEND_ON_SPLIT | \
CEPH_FEATURE_RADOS_BACKOFF | \
CEPH_FEATURE_OSD_RECOVERY_DELETES | \
CEPH_FEATURE_SERVER_MIMIC | \
CEPH_FEATURE_RECOVERY_RESERVATION_2 | \
CEPH_FEATURE_SERVER_NAUTILUS | \
CEPH_FEATURE_CEPHX_V2 | \
CEPH_FEATURE_OSD_PGLOG_HARDLIMIT | \
CEPH_FEATUREMASK_SERVER_OCTOPUS | \
CEPH_FEATUREMASK_STRETCH_MODE | \
CEPH_FEATUREMASK_OSD_REPOP_MLCOD | \
CEPH_FEATUREMASK_SERVER_PACIFIC | \
CEPH_FEATURE_OSD_FIXED_COLLECTION_LIST | \
CEPH_FEATUREMASK_SERVER_QUINCY | \
CEPH_FEATURE_RANGE_BLOCKLIST | \
CEPH_FEATUREMASK_SERVER_REEF | \
0ULL)
#define CEPH_FEATURES_SUPPORTED_DEFAULT CEPH_FEATURES_ALL
/*
* crush related features
*/
#define CEPH_FEATURES_CRUSH \
(CEPH_FEATURE_CRUSH_TUNABLES | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
CEPH_FEATURE_CRUSH_V2 | \
CEPH_FEATURE_CRUSH_V4 | \
CEPH_FEATUREMASK_CRUSH_CHOOSE_ARGS)
/*
* make sure we don't try to use the reserved features
*/
#define CEPH_STATIC_ASSERT(x) (void)(sizeof(int[((x)==0) ? -1 : 0]))
static inline void ____build_time_check_for_reserved_bits(void) {
CEPH_STATIC_ASSERT((CEPH_FEATURES_ALL & CEPH_FEATURE_RESERVED) == 0);
}
#endif
| 11,605 | 40.302491 | 92 | h |
null | ceph-main/src/include/ceph_frag.h | #ifndef FS_CEPH_FRAG_H
#define FS_CEPH_FRAG_H
/*
* "Frags" are a way to describe a subset of a 32-bit number space,
* using a mask and a value to match against that mask. Any given frag
* (subset of the number space) can be partitioned into 2^n sub-frags.
*
* Frags are encoded into a 32-bit word:
* 8 upper bits = "bits"
* 24 lower bits = "value"
* (We could go to 5+27 bits, but who cares.)
*
* We use the _most_ significant bits of the 24 bit value. This makes
* values logically sort.
*
* Unfortunately, because the "bits" field is still in the high bits, we
* can't sort encoded frags numerically. However, it does allow you
* to feed encoded frags as values into frag_contains_value.
*/
static inline __u32 ceph_frag_make(__u32 b, __u32 v)
{
return (b << 24) |
(v & (0xffffffu << (24-b)) & 0xffffffu);
}
static inline __u32 ceph_frag_bits(__u32 f)
{
return f >> 24;
}
static inline __u32 ceph_frag_value(__u32 f)
{
return f & 0xffffffu;
}
static inline __u32 ceph_frag_mask(__u32 f)
{
return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu;
}
static inline __u32 ceph_frag_mask_shift(__u32 f)
{
return 24 - ceph_frag_bits(f);
}
static inline int ceph_frag_contains_value(__u32 f, __u32 v)
{
return (v & ceph_frag_mask(f)) == ceph_frag_value(f);
}
static inline int ceph_frag_contains_frag(__u32 f, __u32 sub)
{
/* is sub as specific as us, and contained by us? */
return ceph_frag_bits(sub) >= ceph_frag_bits(f) &&
(ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f);
}
static inline __u32 ceph_frag_parent(__u32 f)
{
return ceph_frag_make(ceph_frag_bits(f) - 1,
ceph_frag_value(f) & (ceph_frag_mask(f) << 1));
}
static inline int ceph_frag_is_left_child(__u32 f)
{
return ceph_frag_bits(f) > 0 &&
(ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0;
}
static inline int ceph_frag_is_right_child(__u32 f)
{
return ceph_frag_bits(f) > 0 &&
(ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1;
}
static inline __u32 ceph_frag_sibling(__u32 f)
{
return ceph_frag_make(ceph_frag_bits(f),
ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f)));
}
static inline __u32 ceph_frag_left_child(__u32 f)
{
return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f));
}
static inline __u32 ceph_frag_right_child(__u32 f)
{
return ceph_frag_make(ceph_frag_bits(f)+1,
ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f))));
}
static inline __u32 ceph_frag_make_child(__u32 f, int by, int i)
{
int newbits = ceph_frag_bits(f) + by;
return ceph_frag_make(newbits,
ceph_frag_value(f) | (i << (24 - newbits)));
}
static inline int ceph_frag_is_leftmost(__u32 f)
{
return ceph_frag_value(f) == 0;
}
static inline int ceph_frag_is_rightmost(__u32 f)
{
return ceph_frag_value(f) == ceph_frag_mask(f);
}
static inline __u32 ceph_frag_next(__u32 f)
{
return ceph_frag_make(ceph_frag_bits(f),
ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f)));
}
/*
* comparator to sort frags logically, as when traversing the
* number space in ascending order...
*/
int ceph_frag_compare(__u32 a, __u32 b);
#endif
| 3,114 | 27.318182 | 73 | h |