repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | ceph-main/src/crimson/common/tmap_helpers.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/expected.hpp"
#include "include/buffer.h"
#include "include/encoding.h"
namespace crimson::common {
/**
* do_tmap_up
*
* Performs tmap update instructions encoded in buffer referenced by in.
*
* @param [in] in iterator to buffer containing encoded tmap update operations
* @param [in] contents current contents of object
* @return buffer containing new object contents,
* -EINVAL for decoding errors,
* -EEXIST for CEPH_OSD_TMAP_CREATE on a key that exists
* -ENOENT for CEPH_OSD_TMAP_RM on a key that does not exist
*/
using do_tmap_up_ret = tl::expected<bufferlist, int>;
do_tmap_up_ret do_tmap_up(bufferlist::const_iterator in, bufferlist contents);
/**
* do_tmap_put
*
* Validates passed buffer pointed to by in and returns resulting object buffer.
*
* @param [in] in iterator to buffer containing tmap encoding
* @return buffer containing validated tmap encoded by in
* -EINVAL for decoding errors,
*/
using do_tmap_up_ret = tl::expected<bufferlist, int>;
do_tmap_up_ret do_tmap_put(bufferlist::const_iterator in);
}
| 1,186 | 27.95122 | 80 | h |
null | ceph-main/src/crimson/common/tri_mutex.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include <seastar/core/circular_buffer.hh>
class read_lock {
public:
seastar::future<> lock();
void unlock();
};
class write_lock {
public:
seastar::future<> lock();
void unlock();
};
class excl_lock {
public:
seastar::future<> lock();
void unlock();
};
// promote from read to excl
class excl_lock_from_read {
public:
seastar::future<> lock();
void unlock();
};
// promote from write to excl
class excl_lock_from_write {
public:
seastar::future<> lock();
void unlock();
};
// promote from excl to excl
class excl_lock_from_excl {
public:
seastar::future<> lock();
void unlock();
};
/// shared/exclusive mutual exclusion
///
/// this lock design uses reader and writer is entirely and completely
/// independent of the conventional reader/writer lock usage. Here, what we
/// mean is that we can pipeline reads, and we can pipeline writes, but we
/// cannot allow a read while writes are in progress or a write while reads are
/// in progress. Any rmw operation is therefore exclusive.
///
/// tri_mutex is based on seastar::shared_mutex, but instead of two kinds of
/// waiters, tri_mutex keeps track of three kinds of lock users:
/// - readers
/// - writers
/// - exclusive users
class tri_mutex : private read_lock,
write_lock,
excl_lock,
excl_lock_from_read,
excl_lock_from_write,
excl_lock_from_excl
{
public:
tri_mutex() = default;
~tri_mutex();
read_lock& for_read() {
return *this;
}
write_lock& for_write() {
return *this;
}
excl_lock& for_excl() {
return *this;
}
excl_lock_from_read& excl_from_read() {
return *this;
}
excl_lock_from_write& excl_from_write() {
return *this;
}
excl_lock_from_excl& excl_from_excl() {
return *this;
}
// for shared readers
seastar::future<> lock_for_read();
bool try_lock_for_read() noexcept;
void unlock_for_read();
void promote_from_read();
void demote_to_read();
unsigned get_readers() const {
return readers;
}
// for shared writers
seastar::future<> lock_for_write(bool greedy);
bool try_lock_for_write(bool greedy) noexcept;
void unlock_for_write();
void promote_from_write();
void demote_to_write();
unsigned get_writers() const {
return writers;
}
// for exclusive users
seastar::future<> lock_for_excl();
bool try_lock_for_excl() noexcept;
void unlock_for_excl();
bool is_excl_acquired() const {
return exclusively_used;
}
bool is_acquired() const;
/// pass the provided exception to any waiting waiters
template<typename Exception>
void abort(Exception ex) {
while (!waiters.empty()) {
auto& waiter = waiters.front();
waiter.pr.set_exception(std::make_exception_ptr(ex));
waiters.pop_front();
}
}
private:
void wake();
unsigned readers = 0;
unsigned writers = 0;
bool exclusively_used = false;
enum class type_t : uint8_t {
read,
write,
exclusive,
none,
};
struct waiter_t {
waiter_t(seastar::promise<>&& pr, type_t type)
: pr(std::move(pr)), type(type)
{}
seastar::promise<> pr;
type_t type;
};
seastar::circular_buffer<waiter_t> waiters;
friend class read_lock;
friend class write_lock;
friend class excl_lock;
friend class excl_lock_from_read;
friend class excl_lock_from_write;
friend class excl_lock_from_excl;
};
| 3,601 | 21.942675 | 79 | h |
null | ceph-main/src/crimson/common/utility.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <type_traits>
namespace _impl {
template <class T> struct always_false : std::false_type {};
};
template <class T>
void assert_moveable(T& t) {
// It's fine
}
template <class T>
void assert_moveable(const T& t) {
static_assert(_impl::always_false<T>::value, "unable to move-out from T");
}
namespace internal {
template <typename Obj, typename Method, typename ArgTuple, size_t... I>
static auto _apply_method_to_tuple(
Obj &obj, Method method, ArgTuple &&tuple,
std::index_sequence<I...>) {
return (obj.*method)(std::get<I>(std::forward<ArgTuple>(tuple))...);
}
}
template <typename Obj, typename Method, typename ArgTuple>
auto apply_method_to_tuple(Obj &obj, Method method, ArgTuple &&tuple) {
constexpr auto tuple_size = std::tuple_size_v<ArgTuple>;
return internal::_apply_method_to_tuple(
obj, method, std::forward<ArgTuple>(tuple),
std::make_index_sequence<tuple_size>());
}
| 1,050 | 25.948718 | 78 | h |
null | ceph-main/src/crimson/crush/CrushLocation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iosfwd>
#include <map>
#include <string>
#if FMT_VERSION >= 90000
#include <fmt/ostream.h>
#endif
#include <seastar/core/seastar.hh>
namespace crimson::crush {
class CrushLocation {
public:
explicit CrushLocation() {
}
seastar::future<> update_from_conf(); ///< refresh from config
seastar::future<> init_on_startup();
seastar::future<> update_from_hook(); ///< call hook, if present
std::multimap<std::string, std::string> get_location() const;
private:
void _parse(const std::string& s);
std::multimap<std::string, std::string> loc;
};
std::ostream& operator<<(std::ostream& os, const CrushLocation& loc);
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::crush::CrushLocation> : fmt::ostream_formatter {};
#endif
| 884 | 22.289474 | 93 | h |
null | ceph-main/src/crimson/mgr/client.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/timer.hh>
#include "crimson/common/gated.h"
#include "crimson/net/Dispatcher.h"
#include "crimson/net/Fwd.h"
#include "mon/MgrMap.h"
template<typename Message> using Ref = boost::intrusive_ptr<Message>;
namespace crimson::net {
class Messenger;
}
class MMgrMap;
class MMgrConfigure;
namespace crimson::mgr
{
// implement WithStats if you want to report stats to mgr periodically
class WithStats {
public:
virtual seastar::future<MessageURef> get_stats() const = 0;
virtual ~WithStats() {}
};
class Client : public crimson::net::Dispatcher {
public:
Client(crimson::net::Messenger& msgr,
WithStats& with_stats);
seastar::future<> start();
seastar::future<> stop();
void report();
private:
std::optional<seastar::future<>> ms_dispatch(
crimson::net::ConnectionRef conn, Ref<Message> m) override;
void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) final;
void ms_handle_connect(crimson::net::ConnectionRef conn, seastar::shard_id) final;
seastar::future<> handle_mgr_map(crimson::net::ConnectionRef conn,
Ref<MMgrMap> m);
seastar::future<> handle_mgr_conf(crimson::net::ConnectionRef conn,
Ref<MMgrConfigure> m);
seastar::future<> reconnect();
void print(std::ostream&) const;
friend std::ostream& operator<<(std::ostream& out, const Client& client);
private:
MgrMap mgrmap;
crimson::net::Messenger& msgr;
WithStats& with_stats;
crimson::net::ConnectionRef conn;
seastar::timer<seastar::lowres_clock> report_timer;
crimson::common::Gated gate;
};
inline std::ostream& operator<<(std::ostream& out, const Client& client) {
client.print(out);
return out;
}
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::mgr::Client> : fmt::ostream_formatter {};
#endif
| 1,908 | 25.887324 | 84 | h |
null | ceph-main/src/crimson/mon/MonClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include <vector>
#include <seastar/core/future.hh>
#include <seastar/core/gate.hh>
#include <seastar/core/lowres_clock.hh>
#include <seastar/core/shared_ptr.hh>
#include <seastar/core/timer.hh>
#include "auth/AuthRegistry.h"
#include "auth/KeyRing.h"
#include "common/ceph_context.h"
#include "crimson/auth/AuthClient.h"
#include "crimson/auth/AuthServer.h"
#include "crimson/common/auth_handler.h"
#include "crimson/common/gated.h"
#include "crimson/net/Dispatcher.h"
#include "crimson/net/Fwd.h"
#include "mon/MonMap.h"
#include "mon/MonSub.h"
template<typename Message> using Ref = boost::intrusive_ptr<Message>;
namespace crimson::net {
class Messenger;
}
class LogClient;
struct AuthAuthorizeHandler;
class MAuthReply;
struct MMonMap;
struct MMonSubscribeAck;
struct MMonGetVersionReply;
struct MMonCommand;
struct MMonCommandAck;
struct MLogAck;
struct MConfig;
enum class log_flushing_t;
namespace crimson::mon {
class Connection;
class Client : public crimson::net::Dispatcher,
public crimson::auth::AuthClient,
public crimson::auth::AuthServer
{
EntityName entity_name;
KeyRing keyring;
const uint32_t want_keys;
MonMap monmap;
bool ready_to_send = false;
seastar::shared_ptr<Connection> active_con;
std::vector<seastar::shared_ptr<Connection>> pending_conns;
seastar::timer<seastar::lowres_clock> timer;
crimson::net::Messenger& msgr;
LogClient *log_client;
bool more_log_pending = false;
utime_t last_send_log;
seastar::future<> send_log(log_flushing_t flush_flag);
seastar::future<> wait_for_send_log();
// commands
using get_version_t = seastar::future<std::tuple<version_t, version_t>>;
ceph_tid_t last_version_req_id = 0;
std::map<ceph_tid_t, typename get_version_t::promise_type> version_reqs;
ceph_tid_t last_mon_command_id = 0;
using command_result_t =
seastar::future<std::tuple<std::int32_t, std::string, ceph::bufferlist>>;
struct mon_command_t {
MURef<MMonCommand> req;
typename command_result_t::promise_type result;
mon_command_t(MURef<MMonCommand> req);
};
std::vector<mon_command_t> mon_commands;
MonSub sub;
public:
Client(crimson::net::Messenger&, crimson::common::AuthHandler&);
Client(Client&&);
~Client();
seastar::future<> start();
seastar::future<> stop();
void set_log_client(LogClient *clog) {
log_client = clog;
}
const uuid_d& get_fsid() const {
return monmap.fsid;
}
get_version_t get_version(const std::string& map);
command_result_t run_command(std::string&& cmd,
bufferlist&& bl);
seastar::future<> send_message(MessageURef);
bool sub_want(const std::string& what, version_t start, unsigned flags);
void sub_got(const std::string& what, version_t have);
void sub_unwant(const std::string& what);
bool sub_want_increment(const std::string& what, version_t start, unsigned flags);
seastar::future<> renew_subs();
seastar::future<> wait_for_config();
void print(std::ostream&) const;
private:
// AuthServer methods
std::pair<std::vector<uint32_t>, std::vector<uint32_t>>
get_supported_auth_methods(int peer_type) final;
uint32_t pick_con_mode(int peer_type,
uint32_t auth_method,
const std::vector<uint32_t>& preferred_modes) final;
AuthAuthorizeHandler* get_auth_authorize_handler(int peer_type,
int auth_method) final;
int handle_auth_request(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
bool more,
uint32_t auth_method,
const ceph::bufferlist& payload,
uint64_t *p_peer_global_id,
ceph::bufferlist *reply) final;
crimson::common::CephContext cct; // for auth_registry
AuthRegistry auth_registry;
crimson::common::AuthHandler& auth_handler;
// AuthClient methods
crimson::auth::AuthClient::auth_request_t
get_auth_request(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta) final;
// Handle server's request to continue the handshake
ceph::bufferlist handle_auth_reply_more(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
const bufferlist& bl) final;
// Handle server's indication that authentication succeeded
int handle_auth_done(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
uint64_t global_id,
uint32_t con_mode,
const bufferlist& bl) final;
// Handle server's indication that the previous auth attempt failed
int handle_auth_bad_method(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes) final;
private:
void tick();
std::optional<seastar::future<>> ms_dispatch(crimson::net::ConnectionRef conn,
MessageRef m) override;
void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) override;
seastar::future<> handle_monmap(crimson::net::Connection &conn,
Ref<MMonMap> m);
seastar::future<> handle_auth_reply(crimson::net::Connection &conn,
Ref<MAuthReply> m);
seastar::future<> handle_subscribe_ack(Ref<MMonSubscribeAck> m);
seastar::future<> handle_get_version_reply(Ref<MMonGetVersionReply> m);
seastar::future<> handle_mon_command_ack(Ref<MMonCommandAck> m);
seastar::future<> handle_log_ack(Ref<MLogAck> m);
seastar::future<> handle_config(Ref<MConfig> m);
seastar::future<> on_session_opened();
private:
seastar::future<> load_keyring();
seastar::future<> authenticate();
bool is_hunting() const;
// @param rank, rank of the monitor to be connected, if it is less than 0,
// try to connect to all monitors in monmap, until one of them
// is connected.
// @return true if a connection to monitor is established
seastar::future<bool> reopen_session(int rank);
std::vector<unsigned> get_random_mons(unsigned n) const;
seastar::future<> _add_conn(unsigned rank, uint64_t global_id);
void _finish_auth(const entity_addr_t& peer);
crimson::common::Gated gate;
// messages that are waiting for the active_con to be available
struct pending_msg_t {
pending_msg_t(MessageURef m) : msg(std::move(m)) {}
MessageURef msg;
seastar::promise<> pr;
};
std::deque<pending_msg_t> pending_messages;
std::optional<seastar::promise<>> config_updated;
};
inline std::ostream& operator<<(std::ostream& out, const Client& client) {
client.print(out);
return out;
}
} // namespace crimson::mon
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::mon::Client> : fmt::ostream_formatter {};
#endif
| 6,818 | 30.136986 | 84 | h |
null | ceph-main/src/crimson/net/Connection.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <queue>
#include <seastar/core/future.hh>
#include <seastar/core/shared_ptr.hh>
#include "Fwd.h"
namespace crimson::net {
using seq_num_t = uint64_t;
/**
* Connection
*
* Abstraction for messenger connections.
*
* Except when otherwise specified, methods must be invoked from the core on which
* the connection originates.
*/
class Connection : public seastar::enable_shared_from_this<Connection> {
public:
using clock_t = seastar::lowres_system_clock;
Connection() {}
virtual ~Connection() {}
/**
* get_shard_id
*
* The shard id where the Connection is dispatching events and handling I/O.
*
* May be changed with the accept/connect events.
*/
virtual const seastar::shard_id get_shard_id() const = 0;
virtual const entity_name_t &get_peer_name() const = 0;
entity_type_t get_peer_type() const { return get_peer_name().type(); }
int64_t get_peer_id() const { return get_peer_name().num(); }
bool peer_is_mon() const { return get_peer_name().is_mon(); }
bool peer_is_mgr() const { return get_peer_name().is_mgr(); }
bool peer_is_mds() const { return get_peer_name().is_mds(); }
bool peer_is_osd() const { return get_peer_name().is_osd(); }
bool peer_is_client() const { return get_peer_name().is_client(); }
virtual const entity_addr_t &get_peer_addr() const = 0;
const entity_addrvec_t get_peer_addrs() const {
return entity_addrvec_t(get_peer_addr());
}
virtual const entity_addr_t &get_peer_socket_addr() const = 0;
virtual uint64_t get_features() const = 0;
bool has_feature(uint64_t f) const {
return get_features() & f;
}
/// true if the handshake has completed and no errors have been encountered
virtual bool is_connected() const = 0;
/**
* send
*
* Send a message over a connection that has completed its handshake.
*
* May be invoked from any core, but that requires to chain the returned
* future to preserve ordering.
*/
virtual seastar::future<> send(MessageURef msg) = 0;
/**
* send_keepalive
*
* Send a keepalive message over a connection that has completed its
* handshake.
*
* May be invoked from any core, but that requires to chain the returned
* future to preserve ordering.
*/
virtual seastar::future<> send_keepalive() = 0;
virtual clock_t::time_point get_last_keepalive() const = 0;
virtual clock_t::time_point get_last_keepalive_ack() const = 0;
// workaround for the monitor client
virtual void set_last_keepalive_ack(clock_t::time_point when) = 0;
// close the connection and cancel any any pending futures from read/send,
// without dispatching any reset event
virtual void mark_down() = 0;
struct user_private_t {
virtual ~user_private_t() = default;
};
virtual bool has_user_private() const = 0;
virtual user_private_t &get_user_private() = 0;
virtual void set_user_private(std::unique_ptr<user_private_t>) = 0;
virtual void print(std::ostream& out) const = 0;
#ifdef UNIT_TESTS_BUILT
virtual bool is_closed() const = 0;
virtual bool is_closed_clean() const = 0;
virtual bool peer_wins() const = 0;
#endif
};
inline std::ostream& operator<<(std::ostream& out, const Connection& conn) {
out << "[";
conn.print(out);
out << "]";
return out;
}
} // namespace crimson::net
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::Connection> : fmt::ostream_formatter {};
#endif
| 3,853 | 25.763889 | 88 | h |
null | ceph-main/src/crimson/net/Dispatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "Fwd.h"
class AuthAuthorizer;
namespace crimson::net {
class Dispatcher {
public:
virtual ~Dispatcher() {}
// Dispatchers are put into a chain as described by chain-of-responsibility
// pattern. If any of the dispatchers claims this message, it returns a valid
// future to prevent other dispatchers from processing it, and this is also
// used to throttle the connection if it's too busy.
virtual std::optional<seastar::future<>> ms_dispatch(ConnectionRef, MessageRef) = 0;
// The connection is accepted or recoverred(lossless), all the followup
// events and messages will be dispatched to the new_shard.
//
// is_replace=true means the accepted connection has replaced
// another connecting connection with the same peer_addr, which currently only
// happens under lossy policy when both sides wish to connect to each other.
virtual void ms_handle_accept(ConnectionRef conn, seastar::shard_id new_shard, bool is_replace) {}
// The connection is (re)connected, all the followup events and messages will
// be dispatched to the new_shard.
virtual void ms_handle_connect(ConnectionRef conn, seastar::shard_id new_shard) {}
// a reset event is dispatched when the connection is closed unexpectedly.
//
// is_replace=true means the reset connection is going to be replaced by
// another accepting connection with the same peer_addr, which currently only
// happens under lossy policy when both sides wish to connect to each other.
virtual void ms_handle_reset(ConnectionRef conn, bool is_replace) {}
virtual void ms_handle_remote_reset(ConnectionRef conn) {}
};
} // namespace crimson::net
| 2,071 | 36 | 100 | h |
null | ceph-main/src/crimson/net/FrameAssemblerV2.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "msg/async/frames_v2.h"
#include "msg/async/crypto_onwire.h"
#include "msg/async/compression_onwire.h"
#include "crimson/common/gated.h"
#include "crimson/net/Socket.h"
namespace crimson::net {
class SocketConnection;
class FrameAssemblerV2;
using FrameAssemblerV2Ref = std::unique_ptr<FrameAssemblerV2>;
class FrameAssemblerV2 {
public:
FrameAssemblerV2(SocketConnection &conn);
~FrameAssemblerV2();
FrameAssemblerV2(const FrameAssemblerV2 &) = delete;
FrameAssemblerV2(FrameAssemblerV2 &&) = delete;
void set_shard_id(seastar::shard_id _sid) {
assert(seastar::this_shard_id() == sid);
clear();
sid = _sid;
}
seastar::shard_id get_shard_id() const {
return sid;
}
void set_is_rev1(bool is_rev1);
void create_session_stream_handlers(
const AuthConnectionMeta &auth_meta,
bool crossed);
void reset_handlers();
/*
* replacing
*/
struct mover_t {
SocketFRef socket;
ceph::crypto::onwire::rxtx_t session_stream_handlers;
ceph::compression::onwire::rxtx_t session_comp_handlers;
};
mover_t to_replace();
seastar::future<> replace_by(mover_t &&);
/*
* auth signature interfaces
*/
void start_recording();
struct record_bufs_t {
ceph::bufferlist rxbuf;
ceph::bufferlist txbuf;
};
record_bufs_t stop_recording();
/*
* socket maintainence interfaces
*/
// the socket exists and not shutdown
bool is_socket_valid() const;
seastar::shard_id get_socket_shard_id() const;
void set_socket(SocketFRef &&);
void learn_socket_ephemeral_port_as_connector(uint16_t port);
// if may_cross_core == true, gate is required for cross-core shutdown
template <bool may_cross_core>
void shutdown_socket(crimson::common::Gated *gate);
seastar::future<> replace_shutdown_socket(SocketFRef &&);
seastar::future<> close_shutdown_socket();
/*
* socket read and write interfaces
*/
template <bool may_cross_core = true>
seastar::future<ceph::bufferptr> read_exactly(std::size_t bytes);
template <bool may_cross_core = true>
seastar::future<ceph::bufferlist> read(std::size_t bytes);
template <bool may_cross_core = true>
seastar::future<> write(ceph::bufferlist);
template <bool may_cross_core = true>
seastar::future<> flush();
template <bool may_cross_core = true>
seastar::future<> write_flush(ceph::bufferlist);
/*
* frame read and write interfaces
*/
/// may throw negotiation_failure as fault
struct read_main_t {
ceph::msgr::v2::Tag tag;
const ceph::msgr::v2::FrameAssembler *rx_frame_asm;
};
template <bool may_cross_core = true>
seastar::future<read_main_t> read_main_preamble();
/// may throw negotiation_failure as fault
using read_payload_t = ceph::msgr::v2::segment_bls_t;
// FIXME: read_payload_t cannot be no-throw move constructible
template <bool may_cross_core = true>
seastar::future<read_payload_t*> read_frame_payload();
template <class F>
ceph::bufferlist get_buffer(F &tx_frame) {
assert(seastar::this_shard_id() == sid);
#ifdef UNIT_TESTS_BUILT
intercept_frame(F::tag, true);
#endif
auto bl = tx_frame.get_buffer(tx_frame_asm);
log_main_preamble(bl);
return bl;
}
template <class F, bool may_cross_core = true>
seastar::future<> write_flush_frame(F &tx_frame) {
assert(seastar::this_shard_id() == sid);
auto bl = get_buffer(tx_frame);
return write_flush<may_cross_core>(std::move(bl));
}
static FrameAssemblerV2Ref create(SocketConnection &conn);
private:
bool has_socket() const;
SocketFRef move_socket();
void clear();
void log_main_preamble(const ceph::bufferlist &bl);
#ifdef UNIT_TESTS_BUILT
void intercept_frame(ceph::msgr::v2::Tag, bool is_write);
#endif
SocketConnection &conn;
SocketFRef socket;
// checking Socket::is_shutdown() synchronously is impossible when sid is
// different from the socket sid.
bool is_socket_shutdown = false;
// the current working shard, can be messenger or socket shard.
// if is messenger shard, should call interfaces with may_cross_core = true.
seastar::shard_id sid;
/*
* auth signature
*
* only in the messenger core
*/
bool record_io = false;
ceph::bufferlist rxbuf;
ceph::bufferlist txbuf;
/*
* frame data and handlers
*/
ceph::crypto::onwire::rxtx_t session_stream_handlers = { nullptr, nullptr };
// TODO
ceph::compression::onwire::rxtx_t session_comp_handlers = { nullptr, nullptr };
bool is_rev1 = false;
ceph::msgr::v2::FrameAssembler tx_frame_asm{
&session_stream_handlers, is_rev1, common::local_conf()->ms_crc_data,
&session_comp_handlers};
ceph::msgr::v2::FrameAssembler rx_frame_asm{
&session_stream_handlers, is_rev1, common::local_conf()->ms_crc_data,
&session_comp_handlers};
// in the messenger core during handshake,
// and in the socket core during open,
// must be cleaned before switching cores.
ceph::bufferlist rx_preamble;
read_payload_t rx_segments_data;
};
} // namespace crimson::net
| 5,163 | 22.797235 | 81 | h |
null | ceph-main/src/crimson/net/Fwd.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <boost/container/small_vector.hpp>
#include <seastar/core/future.hh>
#include <seastar/core/future-util.hh>
#include <seastar/core/shared_ptr.hh>
#include <seastar/core/sharded.hh>
#include "msg/Connection.h"
#include "msg/MessageRef.h"
#include "msg/msg_types.h"
#include "crimson/common/errorator.h"
#include "crimson/common/local_shared_foreign_ptr.h"
class AuthConnectionMeta;
namespace crimson::net {
using msgr_tag_t = uint8_t;
using stop_t = seastar::stop_iteration;
class Connection;
using ConnectionLRef = seastar::shared_ptr<Connection>;
using ConnectionFRef = seastar::foreign_ptr<ConnectionLRef>;
using ConnectionRef = ::crimson::local_shared_foreign_ptr<ConnectionLRef>;
class Dispatcher;
class ChainedDispatchers;
constexpr std::size_t NUM_DISPATCHERS = 4u;
using dispatchers_t = boost::container::small_vector<Dispatcher*, NUM_DISPATCHERS>;
class Messenger;
using MessengerRef = seastar::shared_ptr<Messenger>;
using MessageFRef = seastar::foreign_ptr<MessageURef>;
} // namespace crimson::net
| 1,446 | 26.301887 | 83 | h |
null | ceph-main/src/crimson/net/Interceptor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <variant>
#include <seastar/core/sharded.hh>
#include <seastar/core/sleep.hh>
#include "Fwd.h"
#include "msg/async/frames_v2.h"
namespace crimson::net {
enum class custom_bp_t : uint8_t {
BANNER_WRITE = 0,
BANNER_READ,
BANNER_PAYLOAD_READ,
SOCKET_CONNECTING,
SOCKET_ACCEPTED
};
inline const char* get_bp_name(custom_bp_t bp) {
uint8_t index = static_cast<uint8_t>(bp);
static const char *const bp_names[] = {"BANNER_WRITE",
"BANNER_READ",
"BANNER_PAYLOAD_READ",
"SOCKET_CONNECTING",
"SOCKET_ACCEPTED"};
assert(index < std::size(bp_names));
return bp_names[index];
}
enum class bp_type_t {
READ = 0,
WRITE
};
enum class bp_action_t {
CONTINUE = 0,
FAULT,
BLOCK,
STALL
};
class socket_blocker {
std::optional<seastar::abort_source> p_blocked;
std::optional<seastar::abort_source> p_unblocked;
public:
seastar::future<> wait_blocked() {
ceph_assert(!p_blocked);
if (p_unblocked) {
return seastar::make_ready_future<>();
} else {
p_blocked = seastar::abort_source();
return seastar::sleep_abortable(std::chrono::seconds(10),
*p_blocked).then([] {
throw std::runtime_error(
"Timeout (10s) in socket_blocker::wait_blocked()");
}).handle_exception_type([] (const seastar::sleep_aborted& e) {
// wait done!
});
}
}
seastar::future<> block() {
if (p_blocked) {
p_blocked->request_abort();
p_blocked = std::nullopt;
}
ceph_assert(!p_unblocked);
p_unblocked = seastar::abort_source();
return seastar::sleep_abortable(std::chrono::seconds(10),
*p_unblocked).then([] {
ceph_abort("Timeout (10s) in socket_blocker::block()");
}).handle_exception_type([] (const seastar::sleep_aborted& e) {
// wait done!
});
}
void unblock() {
ceph_assert(!p_blocked);
ceph_assert(p_unblocked);
p_unblocked->request_abort();
p_unblocked = std::nullopt;
}
};
struct tag_bp_t {
ceph::msgr::v2::Tag tag;
bp_type_t type;
bool operator==(const tag_bp_t& x) const {
return tag == x.tag && type == x.type;
}
bool operator!=(const tag_bp_t& x) const { return !operator==(x); }
bool operator<(const tag_bp_t& x) const {
return std::tie(tag, type) < std::tie(x.tag, x.type);
}
};
struct Breakpoint {
using var_t = std::variant<custom_bp_t, tag_bp_t>;
var_t bp;
Breakpoint(custom_bp_t bp) : bp(bp) { }
Breakpoint(ceph::msgr::v2::Tag tag, bp_type_t type)
: bp(tag_bp_t{tag, type}) { }
bool operator==(const Breakpoint& x) const { return bp == x.bp; }
bool operator!=(const Breakpoint& x) const { return !operator==(x); }
bool operator==(const custom_bp_t& x) const { return bp == var_t(x); }
bool operator!=(const custom_bp_t& x) const { return !operator==(x); }
bool operator==(const tag_bp_t& x) const { return bp == var_t(x); }
bool operator!=(const tag_bp_t& x) const { return !operator==(x); }
bool operator<(const Breakpoint& x) const { return bp < x.bp; }
};
struct Interceptor {
socket_blocker blocker;
virtual ~Interceptor() {}
virtual void register_conn(ConnectionRef) = 0;
virtual void register_conn_ready(ConnectionRef) = 0;
virtual void register_conn_closed(ConnectionRef) = 0;
virtual void register_conn_replaced(ConnectionRef) = 0;
virtual bp_action_t intercept(ConnectionRef, Breakpoint bp) = 0;
};
} // namespace crimson::net
template<>
struct fmt::formatter<crimson::net::bp_action_t> : fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const crimson::net::bp_action_t& action, FormatContext& ctx) const {
static const char *const action_names[] = {"CONTINUE",
"FAULT",
"BLOCK",
"STALL"};
assert(static_cast<size_t>(action) < std::size(action_names));
return formatter<std::string_view>::format(action_names[static_cast<size_t>(action)], ctx);
}
};
template<>
struct fmt::formatter<crimson::net::Breakpoint> : fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const crimson::net::Breakpoint& bp, FormatContext& ctx) const {
if (auto custom_bp = std::get_if<crimson::net::custom_bp_t>(&bp.bp)) {
return formatter<std::string_view>::format(crimson::net::get_bp_name(*custom_bp), ctx);
}
auto tag_bp = std::get<crimson::net::tag_bp_t>(bp.bp);
static const char *const tag_names[] = {"NONE",
"HELLO",
"AUTH_REQUEST",
"AUTH_BAD_METHOD",
"AUTH_REPLY_MORE",
"AUTH_REQUEST_MORE",
"AUTH_DONE",
"AUTH_SIGNATURE",
"CLIENT_IDENT",
"SERVER_IDENT",
"IDENT_MISSING_FEATURES",
"SESSION_RECONNECT",
"SESSION_RESET",
"SESSION_RETRY",
"SESSION_RETRY_GLOBAL",
"SESSION_RECONNECT_OK",
"WAIT",
"MESSAGE",
"KEEPALIVE2",
"KEEPALIVE2_ACK",
"ACK"};
assert(static_cast<size_t>(tag_bp.tag) < std::size(tag_names));
return fmt::format_to(ctx.out(), "{}_{}",
tag_names[static_cast<size_t>(tag_bp.tag)],
tag_bp.type == crimson::net::bp_type_t::WRITE ? "WRITE" : "READ");
}
};
| 6,237 | 34.443182 | 95 | h |
null | ceph-main/src/crimson/net/Messenger.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "Fwd.h"
#include "crimson/common/throttle.h"
#include "msg/Message.h"
#include "msg/Policy.h"
class AuthAuthorizer;
namespace crimson::auth {
class AuthClient;
class AuthServer;
}
namespace crimson::net {
#ifdef UNIT_TESTS_BUILT
class Interceptor;
#endif
using Throttle = crimson::common::Throttle;
using SocketPolicy = ceph::net::Policy<Throttle>;
class Messenger {
public:
Messenger() {}
virtual ~Messenger() {}
virtual const entity_name_t& get_myname() const = 0;
entity_type_t get_mytype() const { return get_myname().type(); }
virtual const entity_addrvec_t &get_myaddrs() const = 0;
entity_addr_t get_myaddr() const { return get_myaddrs().front(); }
virtual void set_myaddrs(const entity_addrvec_t& addrs) = 0;
virtual bool set_addr_unknowns(const entity_addrvec_t &addrs) = 0;
virtual void set_auth_client(crimson::auth::AuthClient *) = 0;
virtual void set_auth_server(crimson::auth::AuthServer *) = 0;
using bind_ertr = crimson::errorator<
crimson::ct_error::address_in_use, // The address (range) is already bound
crimson::ct_error::address_not_available
>;
/// bind to the given address
virtual bind_ertr::future<> bind(const entity_addrvec_t& addr) = 0;
/// start the messenger
virtual seastar::future<> start(const dispatchers_t&) = 0;
/// either return an existing connection to the peer,
/// or a new pending connection
virtual ConnectionRef
connect(const entity_addr_t& peer_addr,
const entity_name_t& peer_name) = 0;
ConnectionRef
connect(const entity_addr_t& peer_addr,
const entity_type_t& peer_type) {
return connect(peer_addr, entity_name_t(peer_type, -1));
}
virtual bool owns_connection(Connection &) const = 0;
// wait for messenger shutdown
virtual seastar::future<> wait() = 0;
// stop dispatching events and messages
virtual void stop() = 0;
virtual bool is_started() const = 0;
// free internal resources before destruction, must be called after stopped,
// and must be called if is bound.
virtual seastar::future<> shutdown() = 0;
virtual void print(std::ostream& out) const = 0;
virtual SocketPolicy get_policy(entity_type_t peer_type) const = 0;
virtual SocketPolicy get_default_policy() const = 0;
virtual void set_default_policy(const SocketPolicy& p) = 0;
virtual void set_policy(entity_type_t peer_type, const SocketPolicy& p) = 0;
virtual void set_policy_throttler(entity_type_t peer_type, Throttle* throttle) = 0;
static MessengerRef
create(const entity_name_t& name,
const std::string& lname,
uint64_t nonce,
bool dispatch_only_on_this_shard);
#ifdef UNIT_TESTS_BUILT
virtual void set_interceptor(Interceptor *) = 0;
#endif
};
inline std::ostream& operator<<(std::ostream& out, const Messenger& msgr) {
out << "[";
msgr.print(out);
out << "]";
return out;
}
} // namespace crimson::net
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::Messenger> : fmt::ostream_formatter {};
#endif
| 3,456 | 25.389313 | 87 | h |
null | ceph-main/src/crimson/net/ProtocolV2.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/shared_future.hh>
#include <seastar/core/sleep.hh>
#include "io_handler.h"
namespace crimson::net {
class ProtocolV2 final : public HandshakeListener {
using AuthConnectionMetaRef = seastar::lw_shared_ptr<AuthConnectionMeta>;
public:
ProtocolV2(SocketConnection &,
IOHandler &);
~ProtocolV2() final;
ProtocolV2(const ProtocolV2 &) = delete;
ProtocolV2(ProtocolV2 &&) = delete;
ProtocolV2 &operator=(const ProtocolV2 &) = delete;
ProtocolV2 &operator=(ProtocolV2 &&) = delete;
/**
* as HandshakeListener
*/
private:
seastar::future<> notify_out(
crosscore_t::seq_t cc_seq) final;
seastar::future<> notify_out_fault(
crosscore_t::seq_t cc_seq,
const char *where,
std::exception_ptr,
io_handler_state) final;
seastar::future<> notify_mark_down(
crosscore_t::seq_t cc_seq) final;
/*
* as ProtocolV2 to be called by SocketConnection
*/
public:
void start_connect(const entity_addr_t& peer_addr,
const entity_name_t& peer_name);
void start_accept(SocketFRef&& socket,
const entity_addr_t& peer_addr);
seastar::future<> close_clean_yielded();
#ifdef UNIT_TESTS_BUILT
bool is_closed_clean() const {
return closed_clean;
}
bool is_closed() const {
return state == state_t::CLOSING;
}
#endif
private:
using io_state_t = IOHandler::io_state_t;
seastar::future<> wait_switch_io_shard() {
if (pr_switch_io_shard.has_value()) {
return pr_switch_io_shard->get_shared_future();
} else {
return seastar::now();
}
}
seastar::future<> wait_exit_io() {
if (pr_exit_io.has_value()) {
return pr_exit_io->get_shared_future();
} else {
assert(!need_exit_io);
return seastar::now();
}
}
enum class state_t {
NONE = 0,
ACCEPTING,
SERVER_WAIT,
ESTABLISHING,
CONNECTING,
READY,
STANDBY,
WAIT,
REPLACING,
CLOSING
};
static const char *get_state_name(state_t state) {
const char *const statenames[] = {"NONE",
"ACCEPTING",
"SERVER_WAIT",
"ESTABLISHING",
"CONNECTING",
"READY",
"STANDBY",
"WAIT",
"REPLACING",
"CLOSING"};
return statenames[static_cast<int>(state)];
}
void trigger_state_phase1(state_t new_state);
void trigger_state_phase2(state_t new_state, io_state_t new_io_state);
void trigger_state(state_t new_state, io_state_t new_io_state) {
ceph_assert_always(!pr_switch_io_shard.has_value());
trigger_state_phase1(new_state);
trigger_state_phase2(new_state, new_io_state);
}
template <typename Func, typename T>
void gated_execute(const char *what, T &who, Func &&func) {
gate.dispatch_in_background(what, who, [this, &who, &func] {
if (!execution_done.available()) {
// discard the unready future
gate.dispatch_in_background(
"gated_execute_abandon",
who,
[fut=std::move(execution_done)]() mutable {
return std::move(fut);
}
);
}
seastar::promise<> pr;
execution_done = pr.get_future();
return seastar::futurize_invoke(std::forward<Func>(func)
).finally([pr=std::move(pr)]() mutable {
pr.set_value();
});
});
}
void fault(state_t expected_state,
const char *where,
std::exception_ptr eptr);
void reset_session(bool is_full);
seastar::future<std::tuple<entity_type_t, entity_addr_t>>
banner_exchange(bool is_connect);
enum class next_step_t {
ready,
wait,
none, // protocol should have been aborted or failed
};
// CONNECTING (client)
seastar::future<> handle_auth_reply();
inline seastar::future<> client_auth() {
std::vector<uint32_t> empty;
return client_auth(empty);
}
seastar::future<> client_auth(std::vector<uint32_t> &allowed_methods);
seastar::future<next_step_t> process_wait();
seastar::future<next_step_t> client_connect();
seastar::future<next_step_t> client_reconnect();
void execute_connecting();
// ACCEPTING (server)
seastar::future<> _auth_bad_method(int r);
seastar::future<> _handle_auth_request(bufferlist& auth_payload, bool more);
seastar::future<> server_auth();
bool validate_peer_name(const entity_name_t& peer_name) const;
seastar::future<next_step_t> send_wait();
seastar::future<next_step_t> reuse_connection(ProtocolV2* existing_proto,
bool do_reset=false,
bool reconnect=false,
uint64_t conn_seq=0,
uint64_t msg_seq=0);
seastar::future<next_step_t> handle_existing_connection(SocketConnectionRef existing_conn);
seastar::future<next_step_t> server_connect();
seastar::future<next_step_t> read_reconnect();
seastar::future<next_step_t> send_retry(uint64_t connect_seq);
seastar::future<next_step_t> send_retry_global(uint64_t global_seq);
seastar::future<next_step_t> send_reset(bool full);
seastar::future<next_step_t> server_reconnect();
void execute_accepting();
// CONNECTING/ACCEPTING
seastar::future<> finish_auth();
// ESTABLISHING
void execute_establishing(SocketConnectionRef existing_conn);
// ESTABLISHING/REPLACING (server)
seastar::future<> send_server_ident();
// REPLACING (server)
void trigger_replacing(bool reconnect,
bool do_reset,
FrameAssemblerV2::mover_t &&mover,
AuthConnectionMetaRef&& new_auth_meta,
uint64_t new_peer_global_seq,
// !reconnect
uint64_t new_client_cookie,
entity_name_t new_peer_name,
uint64_t new_conn_features,
uint64_t new_peer_supported_features,
// reconnect
uint64_t new_connect_seq,
uint64_t new_msg_seq);
// READY
void execute_ready();
// STANDBY
void execute_standby();
// WAIT
void execute_wait(bool max_backoff);
// SERVER_WAIT
void execute_server_wait();
// CLOSING
// reentrant
void do_close(bool is_dispatch_reset,
std::optional<std::function<void()>> f_accept_new=std::nullopt);
private:
SocketConnection &conn;
SocketMessenger &messenger;
IOHandler &io_handler;
// asynchronously populated from io_handler
io_handler_state io_states;
crosscore_t crosscore;
bool has_socket = false;
// the socket exists and it is not shutdown
bool is_socket_valid = false;
FrameAssemblerV2Ref frame_assembler;
bool need_notify_out = false;
std::optional<seastar::shared_promise<>> pr_switch_io_shard;
bool need_exit_io = false;
std::optional<seastar::shared_promise<>> pr_exit_io;
AuthConnectionMetaRef auth_meta;
crimson::common::Gated gate;
seastar::shared_promise<> pr_closed_clean;
#ifdef UNIT_TESTS_BUILT
bool closed_clean = false;
#endif
state_t state = state_t::NONE;
uint64_t peer_supported_features = 0;
uint64_t client_cookie = 0;
uint64_t server_cookie = 0;
uint64_t global_seq = 0;
uint64_t peer_global_seq = 0;
uint64_t connect_seq = 0;
seastar::future<> execution_done = seastar::now();
class Timer {
double last_dur_ = 0.0;
const SocketConnection& conn;
std::optional<seastar::abort_source> as;
public:
Timer(SocketConnection& conn) : conn(conn) {}
double last_dur() const { return last_dur_; }
seastar::future<> backoff(double seconds);
void cancel() {
last_dur_ = 0.0;
if (as) {
as->request_abort();
as = std::nullopt;
}
}
};
Timer protocol_timer;
};
struct create_handlers_ret {
std::unique_ptr<ConnectionHandler> io_handler;
std::unique_ptr<ProtocolV2> protocol;
};
inline create_handlers_ret create_handlers(ChainedDispatchers &dispatchers, SocketConnection &conn) {
std::unique_ptr<ConnectionHandler> io_handler = std::make_unique<IOHandler>(dispatchers, conn);
IOHandler &io_handler_concrete = static_cast<IOHandler&>(*io_handler);
auto protocol = std::make_unique<ProtocolV2>(conn, io_handler_concrete);
io_handler_concrete.set_handshake_listener(*protocol);
return {std::move(io_handler), std::move(protocol)};
}
} // namespace crimson::net
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::ProtocolV2> : fmt::ostream_formatter {};
#endif
| 8,992 | 27.015576 | 101 | h |
null | ceph-main/src/crimson/net/SocketConnection.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <seastar/core/sharded.hh>
#include "msg/Policy.h"
#include "crimson/common/throttle.h"
#include "crimson/net/Connection.h"
#include "crimson/net/Socket.h"
namespace crimson::net {
class ProtocolV2;
class SocketMessenger;
class SocketConnection;
using SocketConnectionRef = seastar::shared_ptr<SocketConnection>;
#ifdef UNIT_TESTS_BUILT
class Interceptor;
#endif
/**
* ConnectionHandler
*
* The interface class to implement Connection, called by SocketConnection.
*
* The operations must be done in get_shard_id().
*/
class ConnectionHandler {
public:
using clock_t = seastar::lowres_system_clock;
virtual ~ConnectionHandler() = default;
ConnectionHandler(const ConnectionHandler &) = delete;
ConnectionHandler(ConnectionHandler &&) = delete;
ConnectionHandler &operator=(const ConnectionHandler &) = delete;
ConnectionHandler &operator=(ConnectionHandler &&) = delete;
virtual seastar::shard_id get_shard_id() const = 0;
virtual bool is_connected() const = 0;
virtual seastar::future<> send(MessageFRef) = 0;
virtual seastar::future<> send_keepalive() = 0;
virtual clock_t::time_point get_last_keepalive() const = 0;
virtual clock_t::time_point get_last_keepalive_ack() const = 0;
virtual void set_last_keepalive_ack(clock_t::time_point) = 0;
virtual void mark_down() = 0;
protected:
ConnectionHandler() = default;
};
class SocketConnection : public Connection {
/*
* Connection interfaces, public to users
* Working in ConnectionHandler::get_shard_id()
*/
public:
SocketConnection(SocketMessenger& messenger,
ChainedDispatchers& dispatchers);
~SocketConnection() override;
const seastar::shard_id get_shard_id() const override {
return io_handler->get_shard_id();
}
const entity_name_t &get_peer_name() const override {
return peer_name;
}
const entity_addr_t &get_peer_addr() const override {
return peer_addr;
}
const entity_addr_t &get_peer_socket_addr() const override {
return target_addr;
}
uint64_t get_features() const override {
return features;
}
bool is_connected() const override;
seastar::future<> send(MessageURef msg) override;
seastar::future<> send_keepalive() override;
clock_t::time_point get_last_keepalive() const override;
clock_t::time_point get_last_keepalive_ack() const override;
void set_last_keepalive_ack(clock_t::time_point when) override;
void mark_down() override;
bool has_user_private() const override {
return user_private != nullptr;
}
user_private_t &get_user_private() override {
assert(has_user_private());
return *user_private;
}
void set_user_private(std::unique_ptr<user_private_t> new_user_private) override {
assert(!has_user_private());
user_private = std::move(new_user_private);
}
void print(std::ostream& out) const override;
/*
* Public to SocketMessenger
* Working in SocketMessenger::get_shard_id();
*/
public:
/// start a handshake from the client's perspective,
/// only call when SocketConnection first construct
void start_connect(const entity_addr_t& peer_addr,
const entity_name_t& peer_name);
/// start a handshake from the server's perspective,
/// only call when SocketConnection first construct
void start_accept(SocketFRef&& socket,
const entity_addr_t& peer_addr);
seastar::future<> close_clean_yielded();
seastar::socket_address get_local_address() const;
seastar::shard_id get_messenger_shard_id() const;
SocketMessenger &get_messenger() const;
ConnectionRef get_local_shared_foreign_from_this();
private:
void set_peer_type(entity_type_t peer_type);
void set_peer_id(int64_t peer_id);
void set_peer_name(entity_name_t name) {
set_peer_type(name.type());
set_peer_id(name.num());
}
void set_features(uint64_t f);
void set_socket(Socket *s);
#ifdef UNIT_TESTS_BUILT
bool is_closed_clean() const override;
bool is_closed() const override;
// peer wins if myaddr > peeraddr
bool peer_wins() const override;
Interceptor *interceptor = nullptr;
#else
// peer wins if myaddr > peeraddr
bool peer_wins() const;
#endif
private:
const seastar::shard_id msgr_sid;
/*
* Core owner is messenger core, may allow to access from the I/O core.
*/
SocketMessenger& messenger;
std::unique_ptr<ProtocolV2> protocol;
Socket *socket = nullptr;
entity_name_t peer_name = {0, entity_name_t::NEW};
entity_addr_t peer_addr;
// which of the peer_addrs we're connecting to (as client)
// or should reconnect to (as peer)
entity_addr_t target_addr;
uint64_t features = 0;
ceph::net::Policy<crimson::common::Throttle> policy;
uint64_t peer_global_id = 0;
/*
* Core owner is I/O core (mutable).
*/
std::unique_ptr<ConnectionHandler> io_handler;
/*
* Core owner is up to the connection user.
*/
std::unique_ptr<user_private_t> user_private;
friend class IOHandler;
friend class ProtocolV2;
friend class FrameAssemblerV2;
};
} // namespace crimson::net
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::SocketConnection> : fmt::ostream_formatter {};
#endif
| 5,624 | 23.141631 | 94 | h |
null | ceph-main/src/crimson/net/SocketMessenger.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <map>
#include <set>
#include <vector>
#include <seastar/core/gate.hh>
#include <seastar/core/reactor.hh>
#include <seastar/core/sharded.hh>
#include <seastar/core/shared_future.hh>
#include "crimson/net/chained_dispatchers.h"
#include "Messenger.h"
#include "Socket.h"
#include "SocketConnection.h"
namespace crimson::net {
class ShardedServerSocket;
class SocketMessenger final : public Messenger {
// Messenger public interfaces
public:
SocketMessenger(const entity_name_t& myname,
const std::string& logic_name,
uint32_t nonce,
bool dispatch_only_on_this_shard);
~SocketMessenger() override;
const entity_name_t &get_myname() const override {
return my_name;
}
const entity_addrvec_t &get_myaddrs() const override {
return my_addrs;
}
void set_myaddrs(const entity_addrvec_t& addr) override;
bool set_addr_unknowns(const entity_addrvec_t &addr) override;
void set_auth_client(crimson::auth::AuthClient *ac) override {
assert(seastar::this_shard_id() == sid);
auth_client = ac;
}
void set_auth_server(crimson::auth::AuthServer *as) override {
assert(seastar::this_shard_id() == sid);
auth_server = as;
}
bind_ertr::future<> bind(const entity_addrvec_t& addr) override;
seastar::future<> start(const dispatchers_t& dispatchers) override;
ConnectionRef connect(const entity_addr_t& peer_addr,
const entity_name_t& peer_name) override;
bool owns_connection(Connection &conn) const override {
assert(seastar::this_shard_id() == sid);
return this == &static_cast<SocketConnection&>(conn).get_messenger();
}
// can only wait once
seastar::future<> wait() override {
assert(seastar::this_shard_id() == sid);
return shutdown_promise.get_future();
}
void stop() override {
assert(seastar::this_shard_id() == sid);
dispatchers.clear();
}
bool is_started() const override {
assert(seastar::this_shard_id() == sid);
return !dispatchers.empty();
}
seastar::future<> shutdown() override;
void print(std::ostream& out) const override {
out << get_myname()
<< "(" << logic_name
<< ") " << get_myaddr();
}
SocketPolicy get_policy(entity_type_t peer_type) const override;
SocketPolicy get_default_policy() const override;
void set_default_policy(const SocketPolicy& p) override;
void set_policy(entity_type_t peer_type, const SocketPolicy& p) override;
void set_policy_throttler(entity_type_t peer_type, Throttle* throttle) override;
// SocketMessenger public interfaces
public:
crimson::auth::AuthClient* get_auth_client() const {
assert(seastar::this_shard_id() == sid);
return auth_client;
}
crimson::auth::AuthServer* get_auth_server() const {
assert(seastar::this_shard_id() == sid);
return auth_server;
}
uint32_t get_global_seq(uint32_t old=0);
void learned_addr(const entity_addr_t &peer_addr_for_me,
const SocketConnection& conn);
SocketConnectionRef lookup_conn(const entity_addr_t& addr);
void accept_conn(SocketConnectionRef);
void unaccept_conn(SocketConnectionRef);
void register_conn(SocketConnectionRef);
void unregister_conn(SocketConnectionRef);
void closing_conn(SocketConnectionRef);
void closed_conn(SocketConnectionRef);
seastar::shard_id get_shard_id() const {
return sid;
}
#ifdef UNIT_TESTS_BUILT
void set_interceptor(Interceptor *i) override {
interceptor = i;
}
Interceptor *interceptor = nullptr;
#endif
private:
seastar::future<> accept(SocketFRef &&, const entity_addr_t &);
listen_ertr::future<> do_listen(const entity_addrvec_t& addr);
/// try to bind to the first unused port of given address
bind_ertr::future<> try_bind(const entity_addrvec_t& addr,
uint32_t min_port, uint32_t max_port);
const seastar::shard_id sid;
// Distinguish messengers with meaningful names for debugging
const std::string logic_name;
const uint32_t nonce;
const bool dispatch_only_on_sid;
entity_name_t my_name;
entity_addrvec_t my_addrs;
crimson::auth::AuthClient* auth_client = nullptr;
crimson::auth::AuthServer* auth_server = nullptr;
ShardedServerSocket *listener = nullptr;
ChainedDispatchers dispatchers;
std::map<entity_addr_t, SocketConnectionRef> connections;
std::set<SocketConnectionRef> accepting_conns;
std::vector<SocketConnectionRef> closing_conns;
ceph::net::PolicySet<Throttle> policy_set;
// specifying we haven't learned our addr; set false when we find it.
bool need_addr = true;
uint32_t global_seq = 0;
bool started = false;
seastar::promise<> shutdown_promise;
};
} // namespace crimson::net
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::SocketMessenger> : fmt::ostream_formatter {};
#endif
| 5,289 | 26.409326 | 93 | h |
null | ceph-main/src/crimson/net/chained_dispatchers.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/smp.hh>
#include "Fwd.h"
#include "crimson/common/log.h"
namespace crimson::net {
class Dispatcher;
class ChainedDispatchers {
public:
void assign(const dispatchers_t& _dispatchers) {
assert(empty());
assert(!_dispatchers.empty());
dispatchers = _dispatchers;
}
void clear() {
dispatchers.clear();
}
bool empty() const {
return dispatchers.empty();
}
seastar::future<> ms_dispatch(crimson::net::ConnectionRef, MessageRef);
void ms_handle_accept(crimson::net::ConnectionRef conn, seastar::shard_id, bool is_replace);
void ms_handle_connect(crimson::net::ConnectionRef conn, seastar::shard_id);
void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace);
void ms_handle_remote_reset(crimson::net::ConnectionRef conn);
private:
dispatchers_t dispatchers;
};
}
| 960 | 23.641026 | 94 | h |
null | ceph-main/src/crimson/net/io_handler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/shared_future.hh>
#include <seastar/util/later.hh>
#include "crimson/common/gated.h"
#include "Fwd.h"
#include "SocketConnection.h"
#include "FrameAssemblerV2.h"
namespace crimson::net {
/**
* crosscore_t
*
* To preserve the event order across cores.
*/
class crosscore_t {
public:
using seq_t = uint64_t;
crosscore_t() = default;
~crosscore_t() = default;
seq_t get_in_seq() const {
return in_seq;
}
seq_t prepare_submit() {
++out_seq;
return out_seq;
}
bool proceed_or_wait(seq_t seq) {
if (seq == in_seq + 1) {
++in_seq;
if (unlikely(in_pr_wait.has_value())) {
in_pr_wait->set_value();
in_pr_wait = std::nullopt;
}
return true;
} else {
return false;
}
}
seastar::future<> wait(seq_t seq) {
assert(seq != in_seq + 1);
if (!in_pr_wait.has_value()) {
in_pr_wait = seastar::shared_promise<>();
}
return in_pr_wait->get_shared_future();
}
private:
seq_t out_seq = 0;
seq_t in_seq = 0;
std::optional<seastar::shared_promise<>> in_pr_wait;
};
/**
* io_handler_state
*
* It is required to populate the states from IOHandler to ProtocolV2
* asynchronously.
*/
struct io_handler_state {
seq_num_t in_seq;
bool is_out_queued;
bool has_out_sent;
bool is_out_queued_or_sent() const {
return is_out_queued || has_out_sent;
}
/*
* should be consistent with the accroding interfaces in IOHandler
*/
void reset_session(bool full) {
in_seq = 0;
if (full) {
is_out_queued = false;
has_out_sent = false;
}
}
void reset_peer_state() {
in_seq = 0;
is_out_queued = is_out_queued_or_sent();
has_out_sent = false;
}
void requeue_out_sent_up_to() {
// noop since the information is insufficient
}
void requeue_out_sent() {
if (has_out_sent) {
has_out_sent = false;
is_out_queued = true;
}
}
};
/**
* HandshakeListener
*
* The interface class for IOHandler to notify the ProtocolV2.
*
* The notifications may be cross-core and must be sent to
* SocketConnection::get_messenger_shard_id()
*/
class HandshakeListener {
public:
virtual ~HandshakeListener() = default;
HandshakeListener(const HandshakeListener&) = delete;
HandshakeListener(HandshakeListener &&) = delete;
HandshakeListener &operator=(const HandshakeListener &) = delete;
HandshakeListener &operator=(HandshakeListener &&) = delete;
virtual seastar::future<> notify_out(
crosscore_t::seq_t cc_seq) = 0;
virtual seastar::future<> notify_out_fault(
crosscore_t::seq_t cc_seq,
const char *where,
std::exception_ptr,
io_handler_state) = 0;
virtual seastar::future<> notify_mark_down(
crosscore_t::seq_t cc_seq) = 0;
protected:
HandshakeListener() = default;
};
/**
* IOHandler
*
* Implements the message read and write paths after the handshake, and also be
* responsible to dispatch events. It is supposed to be working on the same
* core with the underlying socket and the FrameAssemblerV2 class.
*/
class IOHandler final : public ConnectionHandler {
public:
IOHandler(ChainedDispatchers &,
SocketConnection &);
~IOHandler() final;
IOHandler(const IOHandler &) = delete;
IOHandler(IOHandler &&) = delete;
IOHandler &operator=(const IOHandler &) = delete;
IOHandler &operator=(IOHandler &&) = delete;
/*
* as ConnectionHandler
*/
public:
seastar::shard_id get_shard_id() const final {
return shard_states->get_shard_id();
}
bool is_connected() const final {
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
return protocol_is_connected;
}
seastar::future<> send(MessageFRef msg) final;
seastar::future<> send_keepalive() final;
clock_t::time_point get_last_keepalive() const final {
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
return last_keepalive;
}
clock_t::time_point get_last_keepalive_ack() const final {
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
return last_keepalive_ack;
}
void set_last_keepalive_ack(clock_t::time_point when) final {
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
last_keepalive_ack = when;
}
void mark_down() final;
/*
* as IOHandler to be called by ProtocolV2 handshake
*
* The calls may be cross-core and asynchronous
*/
public:
/*
* should not be called cross-core
*/
void set_handshake_listener(HandshakeListener &hl) {
assert(seastar::this_shard_id() == get_shard_id());
ceph_assert_always(handshake_listener == nullptr);
handshake_listener = &hl;
}
io_handler_state get_states() const {
// might be called from prv_sid during wait_io_exit_dispatching()
return {in_seq, is_out_queued(), has_out_sent()};
}
struct io_stat_printer {
const IOHandler &io_handler;
};
void print_io_stat(std::ostream &out) const;
seastar::future<> set_accepted_sid(
crosscore_t::seq_t cc_seq,
seastar::shard_id sid,
ConnectionFRef conn_fref);
/*
* may be called cross-core
*/
seastar::future<> close_io(
crosscore_t::seq_t cc_seq,
bool is_dispatch_reset,
bool is_replace);
/**
* io_state_t
*
* The io_state is changed with the protocol state, to control the
* io behavior accordingly.
*/
enum class io_state_t : uint8_t {
none, // no IO is possible as the connection is not available to the user yet.
delay, // IO is delayed until open.
open, // Dispatch In and Out concurrently.
drop, // Drop IO as the connection is closed.
switched // IO is switched to a different core
// (is moved to maybe_prv_shard_states)
};
friend class fmt::formatter<io_state_t>;
seastar::future<> set_io_state(
crosscore_t::seq_t cc_seq,
io_state_t new_state,
FrameAssemblerV2Ref fa,
bool set_notify_out);
struct exit_dispatching_ret {
FrameAssemblerV2Ref frame_assembler;
io_handler_state io_states;
};
seastar::future<exit_dispatching_ret>
wait_io_exit_dispatching(
crosscore_t::seq_t cc_seq);
seastar::future<> reset_session(
crosscore_t::seq_t cc_seq,
bool full);
seastar::future<> reset_peer_state(
crosscore_t::seq_t cc_seq);
seastar::future<> requeue_out_sent_up_to(
crosscore_t::seq_t cc_seq,
seq_num_t msg_seq);
seastar::future<> requeue_out_sent(
crosscore_t::seq_t cc_seq);
seastar::future<> dispatch_accept(
crosscore_t::seq_t cc_seq,
seastar::shard_id new_sid,
ConnectionFRef,
bool is_replace);
seastar::future<> dispatch_connect(
crosscore_t::seq_t cc_seq,
seastar::shard_id new_sid,
ConnectionFRef);
private:
class shard_states_t;
using shard_states_ref_t = std::unique_ptr<shard_states_t>;
class shard_states_t {
public:
shard_states_t(seastar::shard_id _sid, io_state_t state)
: sid{_sid}, io_state{state} {}
seastar::shard_id get_shard_id() const {
return sid;
}
io_state_t get_io_state() const {
assert(seastar::this_shard_id() == sid);
return io_state;
}
void set_io_state(io_state_t new_state) {
assert(seastar::this_shard_id() == sid);
assert(io_state != new_state);
pr_io_state_changed.set_value();
pr_io_state_changed = seastar::promise<>();
if (io_state == io_state_t::open) {
// from open
if (out_dispatching) {
ceph_assert_always(!out_exit_dispatching.has_value());
out_exit_dispatching = seastar::promise<>();
}
}
io_state = new_state;
}
seastar::future<> wait_state_change() {
assert(seastar::this_shard_id() == sid);
return pr_io_state_changed.get_future();
}
template <typename Func>
void dispatch_in_background(
const char *what, SocketConnection &who, Func &&func) {
assert(seastar::this_shard_id() == sid);
ceph_assert_always(!gate.is_closed());
gate.dispatch_in_background(what, who, std::move(func));
}
void enter_in_dispatching() {
assert(seastar::this_shard_id() == sid);
assert(io_state == io_state_t::open);
ceph_assert_always(!in_exit_dispatching.has_value());
in_exit_dispatching = seastar::promise<>();
}
void exit_in_dispatching() {
assert(seastar::this_shard_id() == sid);
assert(io_state != io_state_t::open);
ceph_assert_always(in_exit_dispatching.has_value());
in_exit_dispatching->set_value();
in_exit_dispatching = std::nullopt;
}
bool try_enter_out_dispatching() {
assert(seastar::this_shard_id() == sid);
if (out_dispatching) {
// already dispatching out
return false;
}
switch (io_state) {
case io_state_t::open:
[[fallthrough]];
case io_state_t::delay:
out_dispatching = true;
return true;
case io_state_t::drop:
[[fallthrough]];
case io_state_t::switched:
// do not dispatch out
return false;
default:
ceph_abort("impossible");
}
}
void notify_out_dispatching_stopped(
const char *what, SocketConnection &conn);
void exit_out_dispatching(
const char *what, SocketConnection &conn) {
assert(seastar::this_shard_id() == sid);
ceph_assert_always(out_dispatching);
out_dispatching = false;
notify_out_dispatching_stopped(what, conn);
}
seastar::future<> wait_io_exit_dispatching();
seastar::future<> close() {
assert(seastar::this_shard_id() == sid);
assert(!gate.is_closed());
return gate.close();
}
bool assert_closed_and_exit() const {
assert(seastar::this_shard_id() == sid);
if (gate.is_closed()) {
ceph_assert_always(io_state == io_state_t::drop ||
io_state == io_state_t::switched);
ceph_assert_always(!out_dispatching);
ceph_assert_always(!out_exit_dispatching);
ceph_assert_always(!in_exit_dispatching);
return true;
} else {
return false;
}
}
static shard_states_ref_t create(
seastar::shard_id sid, io_state_t state) {
return std::make_unique<shard_states_t>(sid, state);
}
static shard_states_ref_t create_from_previous(
shard_states_t &prv_states, seastar::shard_id new_sid);
private:
const seastar::shard_id sid;
io_state_t io_state;
crimson::common::Gated gate;
seastar::promise<> pr_io_state_changed;
bool out_dispatching = false;
std::optional<seastar::promise<>> out_exit_dispatching;
std::optional<seastar::promise<>> in_exit_dispatching;
};
void do_set_io_state(
io_state_t new_state,
std::optional<crosscore_t::seq_t> cc_seq = std::nullopt,
FrameAssemblerV2Ref fa = nullptr,
bool set_notify_out = false);
io_state_t get_io_state() const {
return shard_states->get_io_state();
}
void do_requeue_out_sent();
void do_requeue_out_sent_up_to(seq_num_t seq);
void assign_frame_assembler(FrameAssemblerV2Ref);
seastar::future<> send_redirected(MessageFRef msg);
seastar::future<> do_send(MessageFRef msg);
seastar::future<> send_keepalive_redirected();
seastar::future<> do_send_keepalive();
seastar::future<> to_new_sid(
seastar::shard_id new_sid, ConnectionFRef);
void dispatch_reset(bool is_replace);
void dispatch_remote_reset();
bool is_out_queued() const {
return (!out_pending_msgs.empty() ||
ack_left > 0 ||
need_keepalive ||
next_keepalive_ack.has_value());
}
bool has_out_sent() const {
return !out_sent_msgs.empty();
}
void reset_in();
void reset_out();
void discard_out_sent();
seastar::future<> do_out_dispatch(shard_states_t &ctx);
ceph::bufferlist sweep_out_pending_msgs_to_sent(
bool require_keepalive,
std::optional<utime_t> maybe_keepalive_ack,
bool require_ack);
void maybe_notify_out_dispatch();
void notify_out_dispatch();
void ack_out_sent(seq_num_t seq);
seastar::future<> read_message(
shard_states_t &ctx,
utime_t throttle_stamp,
std::size_t msg_size);
void do_in_dispatch();
seastar::future<> cleanup_prv_shard(seastar::shard_id prv_sid);
private:
shard_states_ref_t shard_states;
crosscore_t crosscore;
// drop was happening in the previous sid
std::optional<seastar::shard_id> maybe_dropped_sid;
// the remaining states in the previous sid for cleanup, see to_new_sid()
shard_states_ref_t maybe_prv_shard_states;
ChainedDispatchers &dispatchers;
SocketConnection &conn;
// core local reference for dispatching, valid until reset/close
ConnectionRef conn_ref;
HandshakeListener *handshake_listener = nullptr;
FrameAssemblerV2Ref frame_assembler;
bool protocol_is_connected = false;
bool need_dispatch_reset = true;
/*
* out states for writing
*/
/// the seq num of the last transmitted message
seq_num_t out_seq = 0;
// messages to be resent after connection gets reset
std::deque<MessageFRef> out_pending_msgs;
// messages sent, but not yet acked by peer
std::deque<MessageFRef> out_sent_msgs;
bool need_keepalive = false;
std::optional<utime_t> next_keepalive_ack = std::nullopt;
uint64_t ack_left = 0;
bool need_notify_out = false;
/*
* in states for reading
*/
/// the seq num of the last received message
seq_num_t in_seq = 0;
clock_t::time_point last_keepalive;
clock_t::time_point last_keepalive_ack;
};
inline std::ostream& operator<<(
std::ostream& out, IOHandler::io_stat_printer stat) {
stat.io_handler.print_io_stat(out);
return out;
}
} // namespace crimson::net
template <>
struct fmt::formatter<crimson::net::io_handler_state> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(crimson::net::io_handler_state state, FormatContext& ctx) {
return fmt::format_to(
ctx.out(),
"io(in_seq={}, is_out_queued={}, has_out_sent={})",
state.in_seq,
state.is_out_queued,
state.has_out_sent);
}
};
template <>
struct fmt::formatter<crimson::net::IOHandler::io_state_t>
: fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(crimson::net::IOHandler::io_state_t state, FormatContext& ctx) {
using enum crimson::net::IOHandler::io_state_t;
std::string_view name;
switch (state) {
case none:
name = "none";
break;
case delay:
name = "delay";
break;
case open:
name = "open";
break;
case drop:
name = "drop";
break;
case switched:
name = "switched";
break;
}
return formatter<string_view>::format(name, ctx);
}
};
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::IOHandler::io_stat_printer> : fmt::ostream_formatter {};
#endif
| 15,166 | 23.863934 | 104 | h |
null | ceph-main/src/crimson/os/futurized_collection.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "osd/osd_types.h"
namespace crimson::os {
class FuturizedStore;
class FuturizedCollection
: public boost::intrusive_ref_counter<FuturizedCollection,
boost::thread_safe_counter>
{
public:
FuturizedCollection(const coll_t& cid)
: cid{cid} {}
virtual ~FuturizedCollection() {}
virtual seastar::future<> flush() {
return seastar::make_ready_future<>();
}
virtual seastar::future<bool> flush_commit() {
return seastar::make_ready_future<bool>(true);
}
const coll_t& get_cid() const {
return cid;
}
private:
const coll_t cid;
};
using CollectionRef = boost::intrusive_ptr<FuturizedCollection>;
}
| 915 | 23.105263 | 70 | h |
null | ceph-main/src/crimson/os/futurized_store.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <map>
#include <optional>
#include <vector>
#include <seastar/core/future.hh>
#include "os/Transaction.h"
#include "crimson/common/smp_helpers.h"
#include "crimson/common/smp_helpers.h"
#include "crimson/osd/exceptions.h"
#include "include/buffer_fwd.h"
#include "include/uuid.h"
#include "osd/osd_types.h"
namespace ceph::os {
class Transaction;
}
namespace crimson::os {
class FuturizedCollection;
class FuturizedStore {
public:
class Shard {
public:
Shard() = default;
virtual ~Shard() = default;
// no copying
explicit Shard(const Shard& o) = delete;
const Shard& operator=(const Shard& o) = delete;
using CollectionRef = boost::intrusive_ptr<FuturizedCollection>;
using read_errorator = crimson::errorator<crimson::ct_error::enoent,
crimson::ct_error::input_output_error>;
virtual read_errorator::future<ceph::bufferlist> read(
CollectionRef c,
const ghobject_t& oid,
uint64_t offset,
size_t len,
uint32_t op_flags = 0) = 0;
virtual read_errorator::future<ceph::bufferlist> readv(
CollectionRef c,
const ghobject_t& oid,
interval_set<uint64_t>& m,
uint32_t op_flags = 0) = 0;
using get_attr_errorator = crimson::errorator<
crimson::ct_error::enoent,
crimson::ct_error::enodata>;
virtual get_attr_errorator::future<ceph::bufferlist> get_attr(
CollectionRef c,
const ghobject_t& oid,
std::string_view name) const = 0;
using get_attrs_ertr = crimson::errorator<
crimson::ct_error::enoent>;
using attrs_t = std::map<std::string, ceph::bufferlist, std::less<>>;
virtual get_attrs_ertr::future<attrs_t> get_attrs(
CollectionRef c,
const ghobject_t& oid) = 0;
virtual seastar::future<struct stat> stat(
CollectionRef c,
const ghobject_t& oid) = 0;
using omap_values_t = std::map<std::string, ceph::bufferlist, std::less<>>;
using omap_keys_t = std::set<std::string>;
virtual read_errorator::future<omap_values_t> omap_get_values(
CollectionRef c,
const ghobject_t& oid,
const omap_keys_t& keys) = 0;
virtual read_errorator::future<std::tuple<bool, omap_values_t>> omap_get_values(
CollectionRef c, ///< [in] collection
const ghobject_t &oid, ///< [in] oid
const std::optional<std::string> &start ///< [in] start, empty for begin
) = 0; ///< @return <done, values> values.empty() only if done
virtual get_attr_errorator::future<bufferlist> omap_get_header(
CollectionRef c,
const ghobject_t& oid) = 0;
virtual seastar::future<std::tuple<std::vector<ghobject_t>, ghobject_t>> list_objects(
CollectionRef c,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) const = 0;
virtual seastar::future<CollectionRef> create_new_collection(const coll_t& cid) = 0;
virtual seastar::future<CollectionRef> open_collection(const coll_t& cid) = 0;
protected:
virtual seastar::future<> do_transaction_no_callbacks(
CollectionRef ch,
ceph::os::Transaction&& txn) = 0;
public:
seastar::future<> do_transaction(
CollectionRef ch,
ceph::os::Transaction&& txn) {
std::unique_ptr<Context> on_commit(
ceph::os::Transaction::collect_all_contexts(txn));
return do_transaction_no_callbacks(
std::move(ch), std::move(txn)
).then([on_commit=std::move(on_commit)]() mutable {
auto c = on_commit.release();
if (c) c->complete(0);
return seastar::now();
});
}
/**
* flush
*
* Flushes outstanding transactions on ch, returned future resolves
* after any previously submitted transactions on ch have committed.
*
* @param ch [in] collection on which to flush
*/
virtual seastar::future<> flush(CollectionRef ch) {
return do_transaction(ch, ceph::os::Transaction{});
}
// error injection
virtual seastar::future<> inject_data_error(const ghobject_t& o) {
return seastar::now();
}
virtual seastar::future<> inject_mdata_error(const ghobject_t& o) {
return seastar::now();
}
virtual read_errorator::future<std::map<uint64_t, uint64_t>> fiemap(
CollectionRef ch,
const ghobject_t& oid,
uint64_t off,
uint64_t len) = 0;
virtual unsigned get_max_attr_name_length() const = 0;
};
public:
static std::unique_ptr<FuturizedStore> create(const std::string& type,
const std::string& data,
const ConfigValues& values);
FuturizedStore()
: primary_core(seastar::this_shard_id())
{}
virtual ~FuturizedStore() = default;
// no copying
explicit FuturizedStore(const FuturizedStore& o) = delete;
const FuturizedStore& operator=(const FuturizedStore& o) = delete;
virtual seastar::future<> start() = 0;
virtual seastar::future<> stop() = 0;
using mount_ertr = crimson::errorator<crimson::stateful_ec>;
virtual mount_ertr::future<> mount() = 0;
virtual seastar::future<> umount() = 0;
using mkfs_ertr = crimson::errorator<crimson::stateful_ec>;
virtual mkfs_ertr::future<> mkfs(uuid_d new_osd_fsid) = 0;
virtual seastar::future<store_statfs_t> stat() const = 0;
virtual uuid_d get_fsid() const = 0;
virtual seastar::future<> write_meta(const std::string& key,
const std::string& value) = 0;
// called on the shard and get this FuturizedStore::shard;
virtual Shard& get_sharded_store() = 0;
virtual seastar::future<std::tuple<int, std::string>> read_meta(
const std::string& key) = 0;
using coll_core_t = std::pair<coll_t, core_id_t>;
virtual seastar::future<std::vector<coll_core_t>> list_collections() = 0;
protected:
const core_id_t primary_core;
};
}
| 5,948 | 29.352041 | 90 | h |
null | ceph-main/src/crimson/os/alienstore/alien_collection.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "os/ObjectStore.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/os/futurized_store.h"
#include "alien_store.h"
namespace crimson::os {
class AlienCollection final : public FuturizedCollection {
public:
AlienCollection(ObjectStore::CollectionHandle ch)
: FuturizedCollection(ch->cid),
collection(ch) {}
~AlienCollection() {}
template <typename Func, typename Result = std::invoke_result_t<Func>>
seastar::futurize_t<Result> with_lock(Func&& func) {
// newer versions of Seastar provide two variants of `with_lock`
// - generic, friendly towards throwing move constructors of Func,
// - specialized for `noexcept`.
// unfortunately, the former has a limitation: the return value
// of `Func` must be compatible with `current_exception_as_future()`
// which boils down to returning `seastar::future<void>`.
static_assert(std::is_nothrow_move_constructible_v<Func>);
return seastar::with_lock(mutex, std::forward<Func>(func));
}
private:
ObjectStore::CollectionHandle collection;
seastar::shared_mutex mutex;
friend AlienStore;
};
}
| 1,233 | 29.85 | 72 | h |
null | ceph-main/src/crimson/os/alienstore/alien_store.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <seastar/core/future.hh>
#include <seastar/core/shared_mutex.hh>
#include "common/ceph_context.h"
#include "os/ObjectStore.h"
#include "osd/osd_types.h"
#include "crimson/os/alienstore/thread_pool.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/os/futurized_store.h"
namespace ceph::os {
class Transaction;
}
namespace crimson::os {
using coll_core_t = FuturizedStore::coll_core_t;
class AlienStore final : public FuturizedStore,
public FuturizedStore::Shard {
public:
AlienStore(const std::string& type,
const std::string& path,
const ConfigValues& values);
~AlienStore() final;
seastar::future<> start() final;
seastar::future<> stop() final;
mount_ertr::future<> mount() final;
seastar::future<> umount() final;
mkfs_ertr::future<> mkfs(uuid_d new_osd_fsid) final;
read_errorator::future<ceph::bufferlist> read(CollectionRef c,
const ghobject_t& oid,
uint64_t offset,
size_t len,
uint32_t op_flags = 0) final;
read_errorator::future<ceph::bufferlist> readv(CollectionRef c,
const ghobject_t& oid,
interval_set<uint64_t>& m,
uint32_t op_flags = 0) final;
get_attr_errorator::future<ceph::bufferlist> get_attr(CollectionRef c,
const ghobject_t& oid,
std::string_view name) const final;
get_attrs_ertr::future<attrs_t> get_attrs(CollectionRef c,
const ghobject_t& oid) final;
read_errorator::future<omap_values_t> omap_get_values(
CollectionRef c,
const ghobject_t& oid,
const omap_keys_t& keys) final;
/// Retrieves paged set of values > start (if present)
read_errorator::future<std::tuple<bool, omap_values_t>> omap_get_values(
CollectionRef c, ///< [in] collection
const ghobject_t &oid, ///< [in] oid
const std::optional<std::string> &start ///< [in] start, empty for begin
) final; ///< @return <done, values> values.empty() iff done
seastar::future<std::tuple<std::vector<ghobject_t>, ghobject_t>> list_objects(
CollectionRef c,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) const final;
seastar::future<CollectionRef> create_new_collection(const coll_t& cid) final;
seastar::future<CollectionRef> open_collection(const coll_t& cid) final;
seastar::future<std::vector<coll_core_t>> list_collections() final;
seastar::future<> do_transaction_no_callbacks(
CollectionRef c,
ceph::os::Transaction&& txn) final;
// error injection
seastar::future<> inject_data_error(const ghobject_t& o) final;
seastar::future<> inject_mdata_error(const ghobject_t& o) final;
seastar::future<> write_meta(const std::string& key,
const std::string& value) final;
seastar::future<std::tuple<int, std::string>> read_meta(
const std::string& key) final;
uuid_d get_fsid() const final;
seastar::future<store_statfs_t> stat() const final;
unsigned get_max_attr_name_length() const final;
seastar::future<struct stat> stat(
CollectionRef,
const ghobject_t&) final;
get_attr_errorator::future<ceph::bufferlist> omap_get_header(
CollectionRef,
const ghobject_t&) final;
read_errorator::future<std::map<uint64_t, uint64_t>> fiemap(
CollectionRef,
const ghobject_t&,
uint64_t off,
uint64_t len) final;
FuturizedStore::Shard& get_sharded_store() final {
return *this;
}
private:
template <class... Args>
auto do_with_op_gate(Args&&... args) const {
return seastar::with_gate(op_gate,
// perfect forwarding in lambda's closure isn't available in C++17
// using tuple as workaround; see: https://stackoverflow.com/a/49902823
[args = std::make_tuple(std::forward<Args>(args)...)] () mutable {
return std::apply([] (auto&&... args) {
return seastar::do_with(std::forward<decltype(args)>(args)...);
}, std::move(args));
});
}
// number of cores that are PREVENTED from being scheduled
// to run alien store threads.
static constexpr int N_CORES_FOR_SEASTAR = 3;
mutable std::unique_ptr<crimson::os::ThreadPool> tp;
const std::string type;
const std::string path;
const ConfigValues values;
uint64_t used_bytes = 0;
std::unique_ptr<ObjectStore> store;
std::unique_ptr<CephContext> cct;
mutable seastar::gate op_gate;
std::unordered_map<coll_t, CollectionRef> coll_map;
};
}
| 4,743 | 34.402985 | 80 | h |
null | ceph-main/src/crimson/os/alienstore/semaphore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <semaphore.h>
#include <ctime>
#include <cerrno>
#include <exception>
#include <chrono>
namespace crimson {
// an implementation of std::counting_semaphore<> in C++17 using the POSIX
// semaphore.
//
// LeastMaxValue is ignored, as we don't have different backends optimized
// for different LeastMaxValues
template<unsigned LeastMaxValue = 64>
class counting_semaphore {
using clock_t = std::chrono::system_clock;
public:
explicit counting_semaphore(unsigned count) noexcept {
sem_init(&sem, 0, count);
}
counting_semaphore(const counting_semaphore&) = delete;
counting_semaphore& operator=(const counting_semaphore&) = delete;
~counting_semaphore() {
sem_destroy(&sem);
}
void acquire() noexcept {
for (;;) {
int err = sem_wait(&sem);
if (err != 0) {
if (errno == EINTR) {
continue;
} else {
std::terminate();
}
} else {
break;
}
}
}
void release(unsigned update = 1) {
for (; update != 0; --update) {
int err = sem_post(&sem);
if (err != 0) {
std::terminate();
}
}
}
template<typename Clock, typename Duration>
bool try_acquire_until(const std::chrono::time_point<Clock, Duration>& abs_time) noexcept {
auto s = std::chrono::time_point_cast<std::chrono::seconds>(abs_time);
auto ns = std::chrono::duration_cast<std::chrono::nanoseconds>(abs_time - s);
struct timespec ts = {
static_cast<std::time_t>(s.time_since_epoch().count()),
static_cast<long>(ns.count())
};
for (;;) {
if (int err = sem_timedwait(&sem, &ts); err) {
if (errno == EINTR) {
continue;
} else if (errno == ETIMEDOUT || errno == EINVAL) {
return false;
} else {
std::terminate();
}
} else {
break;
}
}
return true;
}
template<typename Rep, typename Period>
bool try_acquire_for(const std::chrono::duration<Rep, Period>& rel_time) {
return try_acquire_until(clock_t::now() + rel_time);
}
private:
sem_t sem;
};
}
| 2,219 | 23.395604 | 93 | h |
null | ceph-main/src/crimson/os/alienstore/thread_pool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <atomic>
#include <condition_variable>
#include <tuple>
#include <type_traits>
#include <boost/lockfree/queue.hpp>
#include <boost/optional.hpp>
#include <seastar/core/future.hh>
#include <seastar/core/gate.hh>
#include <seastar/core/reactor.hh>
#include <seastar/core/resource.hh>
#include <seastar/core/semaphore.hh>
#include <seastar/core/sharded.hh>
#if __cplusplus > 201703L
#include <semaphore>
namespace crimson {
using std::counting_semaphore;
}
#else
#include "semaphore.h"
#endif
namespace crimson::os {
struct WorkItem {
virtual ~WorkItem() {}
virtual void process() = 0;
};
template<typename Func>
struct Task final : WorkItem {
using T = std::invoke_result_t<Func>;
using future_stored_type_t =
std::conditional_t<std::is_void_v<T>,
seastar::internal::future_stored_type_t<>,
seastar::internal::future_stored_type_t<T>>;
using futurator_t = seastar::futurize<T>;
public:
explicit Task(Func&& f)
: func(std::move(f))
{}
void process() override {
try {
if constexpr (std::is_void_v<T>) {
func();
state.set();
} else {
state.set(func());
}
} catch (...) {
state.set_exception(std::current_exception());
}
on_done.write_side().signal(1);
}
typename futurator_t::type get_future() {
return on_done.wait().then([this](size_t) {
if (state.failed()) {
return futurator_t::make_exception_future(state.get_exception());
} else {
return futurator_t::from_tuple(state.get_value());
}
});
}
private:
Func func;
seastar::future_state<future_stored_type_t> state;
seastar::readable_eventfd on_done;
};
struct SubmitQueue {
seastar::semaphore free_slots;
seastar::gate pending_tasks;
explicit SubmitQueue(size_t num_free_slots)
: free_slots(num_free_slots)
{}
seastar::future<> stop() {
return pending_tasks.close();
}
};
struct ShardedWorkQueue {
public:
WorkItem* pop_front(std::chrono::milliseconds& queue_max_wait) {
if (sem.try_acquire_for(queue_max_wait)) {
if (!is_stopping()) {
WorkItem* work_item = nullptr;
[[maybe_unused]] bool popped = pending.pop(work_item);
assert(popped);
return work_item;
}
}
return nullptr;
}
void stop() {
stopping = true;
sem.release();
}
void push_back(WorkItem* work_item) {
[[maybe_unused]] bool pushed = pending.push(work_item);
assert(pushed);
sem.release();
}
private:
bool is_stopping() const {
return stopping;
}
std::atomic<bool> stopping = false;
static constexpr unsigned QUEUE_SIZE = 128;
crimson::counting_semaphore<QUEUE_SIZE> sem{0};
boost::lockfree::queue<WorkItem*> pending{QUEUE_SIZE};
};
/// an engine for scheduling non-seastar tasks from seastar fibers
class ThreadPool {
public:
/**
* @param queue_sz the depth of pending queue. before a task is scheduled,
* it waits in this queue. we will round this number to
* multiple of the number of cores.
* @param n_threads the number of threads in this thread pool.
* @param cpu the CPU core to which this thread pool is assigned
* @note each @c Task has its own crimson::thread::Condition, which possesses
* an fd, so we should keep the size of queue under a reasonable limit.
*/
ThreadPool(size_t n_threads, size_t queue_sz, const std::optional<seastar::resource::cpuset>& cpus);
~ThreadPool();
seastar::future<> start();
seastar::future<> stop();
size_t size() {
return n_threads;
}
template<typename Func, typename...Args>
auto submit(int shard, Func&& func, Args&&... args) {
auto packaged = [func=std::move(func),
args=std::forward_as_tuple(args...)] {
return std::apply(std::move(func), std::move(args));
};
return seastar::with_gate(submit_queue.local().pending_tasks,
[packaged=std::move(packaged), shard, this] {
return local_free_slots().wait()
.then([packaged=std::move(packaged), shard, this] {
auto task = new Task{std::move(packaged)};
auto fut = task->get_future();
pending_queues[shard].push_back(task);
return fut.finally([task, this] {
local_free_slots().signal();
delete task;
});
});
});
}
template<typename Func>
auto submit(Func&& func) {
return submit(::rand() % n_threads, std::forward<Func>(func));
}
private:
void loop(std::chrono::milliseconds queue_max_wait, size_t shard);
bool is_stopping() const {
return stopping.load(std::memory_order_relaxed);
}
static void pin(const seastar::resource::cpuset& cpus);
static void block_sighup();
seastar::semaphore& local_free_slots() {
return submit_queue.local().free_slots;
}
ThreadPool(const ThreadPool&) = delete;
ThreadPool& operator=(const ThreadPool&) = delete;
private:
size_t n_threads;
std::atomic<bool> stopping = false;
std::vector<std::thread> threads;
seastar::sharded<SubmitQueue> submit_queue;
const size_t queue_size;
std::vector<ShardedWorkQueue> pending_queues;
};
} // namespace crimson::os
| 5,335 | 27.843243 | 102 | h |
null | ceph-main/src/crimson/os/cyanstore/cyan_collection.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <unordered_map>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "include/buffer.h"
#include "osd/osd_types.h"
#include "crimson/os/futurized_collection.h"
namespace crimson::os {
class Object;
/**
* a collection also orders transactions
*
* Any transactions queued under a given collection will be applied in
* sequence. Transactions queued under different collections may run
* in parallel.
*
* ObjectStore users may get collection handles with open_collection() (or,
* for bootstrapping a new collection, create_new_collection()).
*/
struct Collection final : public FuturizedCollection {
using ObjectRef = boost::intrusive_ptr<Object>;
int bits = 0;
// always use bufferlist object for testing
bool use_page_set = false;
std::unordered_map<ghobject_t, ObjectRef> object_hash; ///< for lookup
std::map<ghobject_t, ObjectRef> object_map; ///< for iteration
std::map<std::string,bufferptr> xattr;
bool exists = true;
Collection(const coll_t& c);
~Collection() final;
ObjectRef create_object() const;
ObjectRef get_object(ghobject_t oid);
ObjectRef get_or_create_object(ghobject_t oid);
uint64_t used_bytes() const;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& p);
};
}
| 1,446 | 26.826923 | 75 | h |
null | ceph-main/src/crimson/os/cyanstore/cyan_object.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstddef>
#include <map>
#include <string>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "include/buffer.h"
namespace crimson::os {
struct Object : public boost::intrusive_ref_counter<
Object,
boost::thread_unsafe_counter>
{
using bufferlist = ceph::bufferlist;
bufferlist data;
// use transparent comparator for better performance, see
// https://en.cppreference.com/w/cpp/utility/functional/less_void
std::map<std::string,bufferlist,std::less<>> xattr;
bufferlist omap_header;
std::map<std::string,bufferlist> omap;
typedef boost::intrusive_ptr<Object> Ref;
Object() = default;
// interface for object data
size_t get_size() const;
ceph::bufferlist read(uint64_t offset, uint64_t len);
int write(uint64_t offset, const bufferlist &bl);
int clone(Object *src, uint64_t srcoff, uint64_t len,
uint64_t dstoff);
int truncate(uint64_t offset);
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& p);
};
using ObjectRef = boost::intrusive_ptr<Object>;
}
| 1,198 | 25.065217 | 70 | h |
null | ceph-main/src/crimson/os/cyanstore/cyan_store.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <unordered_map>
#include <map>
#include <typeinfo>
#include <vector>
#include <optional>
#include <seastar/core/future.hh>
#include <seastar/core/future-util.hh>
#include "osd/osd_types.h"
#include "include/uuid.h"
#include "crimson/os/cyanstore/cyan_object.h"
#include "crimson/os/cyanstore/cyan_collection.h"
#include "crimson/os/futurized_store.h"
namespace ceph::os {
class Transaction;
}
namespace crimson::os {
class CyanStore final : public FuturizedStore {
class Shard : public FuturizedStore::Shard {
public:
Shard(std::string path)
:path(path){}
seastar::future<struct stat> stat(
CollectionRef c,
const ghobject_t& oid) final;
read_errorator::future<ceph::bufferlist> read(
CollectionRef c,
const ghobject_t& oid,
uint64_t offset,
size_t len,
uint32_t op_flags = 0) final;
read_errorator::future<ceph::bufferlist> readv(
CollectionRef c,
const ghobject_t& oid,
interval_set<uint64_t>& m,
uint32_t op_flags = 0) final;
get_attr_errorator::future<ceph::bufferlist> get_attr(
CollectionRef c,
const ghobject_t& oid,
std::string_view name) const final;
get_attrs_ertr::future<attrs_t> get_attrs(
CollectionRef c,
const ghobject_t& oid) final;
read_errorator::future<omap_values_t> omap_get_values(
CollectionRef c,
const ghobject_t& oid,
const omap_keys_t& keys) final;
read_errorator::future<std::tuple<bool, omap_values_t>> omap_get_values(
CollectionRef c, ///< [in] collection
const ghobject_t &oid, ///< [in] oid
const std::optional<std::string> &start ///< [in] start, empty for begin
) final;
get_attr_errorator::future<ceph::bufferlist> omap_get_header(
CollectionRef c,
const ghobject_t& oid) final;
seastar::future<std::tuple<std::vector<ghobject_t>, ghobject_t>>
list_objects(
CollectionRef c,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) const final;
seastar::future<CollectionRef> create_new_collection(const coll_t& cid) final;
seastar::future<CollectionRef> open_collection(const coll_t& cid) final;
seastar::future<> do_transaction_no_callbacks(
CollectionRef ch,
ceph::os::Transaction&& txn) final;
read_errorator::future<std::map<uint64_t, uint64_t>>
fiemap(
CollectionRef c,
const ghobject_t& oid,
uint64_t off,
uint64_t len) final;
unsigned get_max_attr_name_length() const final;
public:
// only exposed to CyanStore
mount_ertr::future<> mount();
seastar::future<> umount();
seastar::future<> mkfs();
mkfs_ertr::future<> mkcoll(uuid_d new_osd_fsid);
using coll_core_t = FuturizedStore::coll_core_t;
seastar::future<std::vector<coll_core_t>> list_collections();
uint64_t get_used_bytes() const { return used_bytes; }
private:
int _remove(const coll_t& cid, const ghobject_t& oid);
int _touch(const coll_t& cid, const ghobject_t& oid);
int _write(const coll_t& cid, const ghobject_t& oid,
uint64_t offset, size_t len, const ceph::bufferlist& bl,
uint32_t fadvise_flags);
int _zero(const coll_t& cid, const ghobject_t& oid,
uint64_t offset, size_t len);
int _omap_clear(
const coll_t& cid,
const ghobject_t& oid);
int _omap_set_values(
const coll_t& cid,
const ghobject_t& oid,
std::map<std::string, ceph::bufferlist> &&aset);
int _omap_set_header(
const coll_t& cid,
const ghobject_t& oid,
const ceph::bufferlist &header);
int _omap_rmkeys(
const coll_t& cid,
const ghobject_t& oid,
const omap_keys_t& aset);
int _omap_rmkeyrange(
const coll_t& cid,
const ghobject_t& oid,
const std::string &first,
const std::string &last);
int _truncate(const coll_t& cid, const ghobject_t& oid, uint64_t size);
int _clone(const coll_t& cid, const ghobject_t& oid,
const ghobject_t& noid);
int _setattrs(const coll_t& cid, const ghobject_t& oid,
std::map<std::string,bufferlist>&& aset);
int _rm_attr(const coll_t& cid, const ghobject_t& oid,
std::string_view name);
int _rm_attrs(const coll_t& cid, const ghobject_t& oid);
int _create_collection(const coll_t& cid, int bits);
boost::intrusive_ptr<Collection> _get_collection(const coll_t& cid);
private:
uint64_t used_bytes = 0;
const std::string path;
std::unordered_map<coll_t, boost::intrusive_ptr<Collection>> coll_map;
std::map<coll_t, boost::intrusive_ptr<Collection>> new_coll_map;
};
public:
CyanStore(const std::string& path);
~CyanStore() final;
seastar::future<> start() final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.start(path);
}
seastar::future<> stop() final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.stop();
}
mount_ertr::future<> mount() final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.invoke_on_all(
[](auto &local_store) {
return local_store.mount().handle_error(
crimson::stateful_ec::handle([](const auto& ec) {
crimson::get_logger(ceph_subsys_cyanstore).error(
"error mounting cyanstore: ({}) {}",
ec.value(), ec.message());
std::exit(EXIT_FAILURE);
}));
});
}
seastar::future<> umount() final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.invoke_on_all(
[](auto &local_store) {
return local_store.umount();
});
}
mkfs_ertr::future<> mkfs(uuid_d new_osd_fsid) final;
seastar::future<store_statfs_t> stat() const final;
uuid_d get_fsid() const final;
seastar::future<> write_meta(const std::string& key,
const std::string& value) final;
FuturizedStore::Shard& get_sharded_store() final{
return shard_stores.local();
}
seastar::future<std::tuple<int, std::string>>
read_meta(const std::string& key) final;
seastar::future<std::vector<coll_core_t>> list_collections() final;
private:
seastar::sharded<CyanStore::Shard> shard_stores;
const std::string path;
uuid_d osd_fsid;
};
}
| 6,408 | 28.131818 | 82 | h |
null | ceph-main/src/crimson/os/seastore/backref_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/transaction.h"
namespace crimson::os::seastore {
/**
* Abstract interface for managing back references that map paddr_t to laddr_t
*/
class BackrefManager {
public:
using base_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using base_iertr = trans_iertr<base_ertr>;
using mkfs_iertr = base_iertr;
using mkfs_ret = mkfs_iertr::future<>;
virtual mkfs_ret mkfs(
Transaction &t) = 0;
/**
* Fetches mappings for paddr_t in range [offset, offset + len)
*
* Future will not resolve until all pins have resolved
*/
using get_mappings_iertr = base_iertr;
using get_mappings_ret = get_mappings_iertr::future<backref_pin_list_t>;
virtual get_mappings_ret get_mappings(
Transaction &t,
paddr_t offset,
paddr_t end) = 0;
/**
* Fetches the mapping for paddr_t
*
* Future will not resolve until the pin has resolved
*/
using get_mapping_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
using get_mapping_ret = get_mapping_iertr::future<BackrefMappingRef>;
virtual get_mapping_ret get_mapping(
Transaction &t,
paddr_t offset) = 0;
/**
* rewrite_extent
*
* rewrite extent into passed transaction
*/
using rewrite_extent_iertr = base_iertr;
using rewrite_extent_ret = rewrite_extent_iertr::future<>;
virtual rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent) = 0;
/**
* Insert new paddr_t -> laddr_t mapping
*/
using new_mapping_iertr = base_iertr;
using new_mapping_ret = new_mapping_iertr::future<BackrefMappingRef>;
virtual new_mapping_ret new_mapping(
Transaction &t,
paddr_t key,
extent_len_t len,
laddr_t val,
extent_types_t type) = 0;
/**
* Check if a CachedExtent is alive, should be called
* after replay on each cached extent.
*
* @return returns whether the extent is alive
*/
using init_cached_extent_iertr = base_iertr;
using init_cached_extent_ret = init_cached_extent_iertr::future<bool>;
virtual init_cached_extent_ret init_cached_extent(
Transaction &t,
CachedExtentRef e) = 0;
virtual Cache::backref_entry_query_mset_t
get_cached_backref_entries_in_range(
paddr_t start,
paddr_t end) = 0;
using retrieve_backref_extents_in_range_iertr = base_iertr;
using retrieve_backref_extents_in_range_ret =
retrieve_backref_extents_in_range_iertr::future<std::vector<CachedExtentRef>>;
virtual retrieve_backref_extents_in_range_ret
retrieve_backref_extents_in_range(
Transaction &t,
paddr_t start,
paddr_t end) = 0;
virtual void cache_new_backref_extent(
paddr_t paddr,
paddr_t key,
extent_types_t type) = 0;
/**
* merge in-cache paddr_t -> laddr_t mappings to the on-disk backref tree
*/
using merge_cached_backrefs_iertr = base_iertr;
using merge_cached_backrefs_ret = merge_cached_backrefs_iertr::future<journal_seq_t>;
virtual merge_cached_backrefs_ret merge_cached_backrefs(
Transaction &t,
const journal_seq_t &limit,
const uint64_t max) = 0;
struct remove_mapping_result_t {
paddr_t offset = P_ADDR_NULL;
extent_len_t len = 0;
laddr_t laddr = L_ADDR_NULL;
};
/**
* delete the mapping for paddr_t offset
*/
using remove_mapping_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
using remove_mapping_ret = remove_mapping_iertr::future<remove_mapping_result_t>;
virtual remove_mapping_ret remove_mapping(
Transaction &t,
paddr_t offset) = 0;
using check_child_trackers_ret = base_iertr::future<>;
virtual check_child_trackers_ret check_child_trackers(Transaction &t) = 0;
/**
* scan all extents in both tree and cache,
* including backref extents, logical extents and lba extents,
* visit them with scan_mapped_space_func_t
*/
using scan_mapped_space_iertr = base_iertr;
using scan_mapped_space_ret = scan_mapped_space_iertr::future<>;
using scan_mapped_space_func_t = std::function<
void(paddr_t, paddr_t, extent_len_t, extent_types_t, laddr_t)>;
virtual scan_mapped_space_ret scan_mapped_space(
Transaction &t,
scan_mapped_space_func_t &&f) = 0;
virtual ~BackrefManager() {}
};
using BackrefManagerRef =
std::unique_ptr<BackrefManager>;
BackrefManagerRef create_backref_manager(
Cache &cache);
} // namespace crimson::os::seastore::backref
| 4,580 | 28.365385 | 87 | h |
null | ceph-main/src/crimson/os/seastore/collection_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include "osd/osd_types.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
namespace crimson::os::seastore {
struct coll_info_t {
unsigned split_bits;
coll_info_t(unsigned bits)
: split_bits(bits) {}
bool operator==(const coll_info_t &rhs) const {
return split_bits == rhs.split_bits;
}
};
/// Interface for maintaining set of collections
class CollectionManager {
public:
using base_iertr = TransactionManager::read_extent_iertr;
/// Initialize collection manager instance for an empty store
using mkfs_iertr = TransactionManager::alloc_extent_iertr;
using mkfs_ret = mkfs_iertr::future<coll_root_t>;
virtual mkfs_ret mkfs(
Transaction &t) = 0;
/// Create collection
using create_iertr = base_iertr;
using create_ret = create_iertr::future<>;
virtual create_ret create(
coll_root_t &root,
Transaction &t,
coll_t cid,
coll_info_t info
) = 0;
/// List collections with info
using list_iertr = base_iertr;
using list_ret_bare = std::vector<std::pair<coll_t, coll_info_t>>;
using list_ret = list_iertr::future<list_ret_bare>;
virtual list_ret list(
const coll_root_t &root,
Transaction &t) = 0;
/// Remove cid
using remove_iertr = base_iertr;
using remove_ret = remove_iertr::future<>;
virtual remove_ret remove(
const coll_root_t &coll_root,
Transaction &t,
coll_t cid) = 0;
/// Update info for cid
using update_iertr = base_iertr;
using update_ret = base_iertr::future<>;
virtual update_ret update(
const coll_root_t &coll_root,
Transaction &t,
coll_t cid,
coll_info_t info
) = 0;
virtual ~CollectionManager() {}
};
using CollectionManagerRef = std::unique_ptr<CollectionManager>;
namespace collection_manager {
/* creat CollectionMapManager for Collection */
CollectionManagerRef create_coll_manager(
TransactionManager &trans_manager);
}
}
| 2,088 | 23.576471 | 70 | h |
null | ceph-main/src/crimson/os/seastore/device.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include "include/buffer_fwd.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/seastore_types.h"
namespace crimson::os::seastore {
using magic_t = uint64_t;
struct device_spec_t {
magic_t magic = 0;
device_type_t dtype = device_type_t::NONE;
device_id_t id = DEVICE_ID_NULL;
DENC(device_spec_t, v, p) {
DENC_START(1, 1, p);
denc(v.magic, p);
denc(v.dtype, p);
denc(v.id, p);
DENC_FINISH(p);
}
};
std::ostream& operator<<(std::ostream&, const device_spec_t&);
using secondary_device_set_t =
std::map<device_id_t, device_spec_t>;
struct device_config_t {
bool major_dev = false;
device_spec_t spec;
seastore_meta_t meta;
secondary_device_set_t secondary_devices;
DENC(device_config_t, v, p) {
DENC_START(1, 1, p);
denc(v.major_dev, p);
denc(v.spec, p);
denc(v.meta, p);
denc(v.secondary_devices, p);
DENC_FINISH(p);
}
static device_config_t create_primary(
uuid_d new_osd_fsid,
device_id_t id,
device_type_t d_type,
secondary_device_set_t sds) {
return device_config_t{
true,
device_spec_t{
(magic_t)std::rand(),
d_type,
id},
seastore_meta_t{new_osd_fsid},
sds};
}
static device_config_t create_secondary(
uuid_d new_osd_fsid,
device_id_t id,
device_type_t d_type,
magic_t magic) {
return device_config_t{
false,
device_spec_t{
magic,
d_type,
id},
seastore_meta_t{new_osd_fsid},
secondary_device_set_t()};
}
};
std::ostream& operator<<(std::ostream&, const device_config_t&);
class Device;
using DeviceRef = std::unique_ptr<Device>;
/**
* Device
*
* Represents a general device regardless of the underlying medium.
*/
class Device {
// interfaces used by device
public:
virtual ~Device() {}
virtual seastar::future<> start() {
return seastar::now();
}
virtual seastar::future<> stop() {
return seastar::now();
}
// called on the shard to get this shard device;
virtual Device& get_sharded_device() {
return *this;
}
using access_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::permission_denied,
crimson::ct_error::enoent>;
using mkfs_ertr = access_ertr;
using mkfs_ret = mkfs_ertr::future<>;
virtual mkfs_ret mkfs(device_config_t) = 0;
using mount_ertr = access_ertr;
using mount_ret = access_ertr::future<>;
virtual mount_ret mount() = 0;
static seastar::future<DeviceRef> make_device(
const std::string &device,
device_type_t dtype);
// interfaces used by each device shard
public:
virtual device_id_t get_device_id() const = 0;
virtual magic_t get_magic() const = 0;
virtual device_type_t get_device_type() const = 0;
virtual backend_type_t get_backend_type() const = 0;
virtual const seastore_meta_t &get_meta() const = 0;
virtual extent_len_t get_block_size() const = 0;
virtual std::size_t get_available_size() const = 0;
virtual secondary_device_set_t& get_secondary_devices() = 0;
using close_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
virtual close_ertr::future<> close() = 0;
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
virtual read_ertr::future<> read(
paddr_t addr,
size_t len,
ceph::bufferptr &out) = 0;
read_ertr::future<ceph::bufferptr> read(
paddr_t addr,
size_t len
) {
auto ptrref = std::make_unique<ceph::bufferptr>(
buffer::create_page_aligned(len));
return read(addr, len, *ptrref
).safe_then([ptrref=std::move(ptrref)]() mutable {
return read_ertr::make_ready_future<bufferptr>(std::move(*ptrref));
});
}
};
}
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::device_spec_t)
WRITE_CLASS_DENC(crimson::os::seastore::device_config_t)
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::device_config_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::device_spec_t> : fmt::ostream_formatter {};
#endif
| 4,399 | 24 | 102 | h |
null | ceph-main/src/crimson/os/seastore/journal.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include "crimson/os/seastore/ordering_handle.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/segment_seq_allocator.h"
namespace crimson::os::seastore {
namespace random_block_device {
class RBMDevice;
}
class SegmentManagerGroup;
class SegmentProvider;
class JournalTrimmer;
class Journal {
public:
virtual JournalTrimmer &get_trimmer() = 0;
/**
* initializes journal for mkfs writes -- must run prior to calls
* to submit_record.
*/
using open_for_mkfs_ertr = crimson::errorator<
crimson::ct_error::input_output_error
>;
using open_for_mkfs_ret = open_for_mkfs_ertr::future<journal_seq_t>;
virtual open_for_mkfs_ret open_for_mkfs() = 0;
/**
* initializes journal for new writes -- must run prior to calls
* to submit_record. Should be called after replay if not a new
* Journal.
*/
using open_for_mount_ertr = open_for_mkfs_ertr;
using open_for_mount_ret = open_for_mkfs_ret;
virtual open_for_mount_ret open_for_mount() = 0;
/// close journal
using close_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
virtual close_ertr::future<> close() = 0;
/**
* submit_record
*
* write record with the ordering handle
*/
using submit_record_ertr = crimson::errorator<
crimson::ct_error::erange,
crimson::ct_error::input_output_error
>;
using submit_record_ret = submit_record_ertr::future<
record_locator_t
>;
virtual submit_record_ret submit_record(
record_t &&record,
OrderingHandle &handle
) = 0;
/**
* flush
*
* Wait for all outstanding IOs on handle to commit.
* Note, flush() machinery must go through the same pipeline
* stages and locks as submit_record.
*/
virtual seastar::future<> flush(OrderingHandle &handle) = 0;
/// sets write pipeline reference
virtual void set_write_pipeline(WritePipeline *_write_pipeline) = 0;
/**
* Read deltas and pass to delta_handler
*
* record_block_start (argument to delta_handler) is the start of the
* of the first block in the record
*/
using replay_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
using replay_ret = replay_ertr::future<>;
using delta_handler_t = std::function<
replay_ertr::future<bool>(
const record_locator_t&,
const delta_info_t&,
const journal_seq_t&, // dirty_tail
const journal_seq_t&, // alloc_tail
sea_time_point modify_time)>;
virtual replay_ret replay(
delta_handler_t &&delta_handler) = 0;
virtual seastar::future<> finish_commit(
transaction_type_t type) = 0;
virtual ~Journal() {}
virtual journal_type_t get_type() = 0;
};
using JournalRef = std::unique_ptr<Journal>;
namespace journal {
JournalRef make_segmented(
SegmentProvider &provider,
JournalTrimmer &trimmer);
JournalRef make_circularbounded(
JournalTrimmer &trimmer,
crimson::os::seastore::random_block_device::RBMDevice* device,
std::string path);
}
}
| 3,215 | 25.146341 | 71 | h |
null | ceph-main/src/crimson/os/seastore/lba_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "include/buffer_fwd.h"
#include "include/interval_set.h"
#include "common/interval_map.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/seastore_types.h"
namespace crimson::os::seastore {
/**
* Abstract interface for managing the logical to physical mapping
*/
class LBAManager {
public:
using base_iertr = Cache::base_iertr;
using mkfs_iertr = base_iertr;
using mkfs_ret = mkfs_iertr::future<>;
virtual mkfs_ret mkfs(
Transaction &t
) = 0;
/**
* Fetches mappings for laddr_t in range [offset, offset + len)
*
* Future will not resolve until all pins have resolved (set_paddr called)
*/
using get_mappings_iertr = base_iertr;
using get_mappings_ret = get_mappings_iertr::future<lba_pin_list_t>;
virtual get_mappings_ret get_mappings(
Transaction &t,
laddr_t offset, extent_len_t length) = 0;
/**
* Fetches mappings for a list of laddr_t in range [offset, offset + len)
*
* Future will not resolve until all pins have resolved (set_paddr called)
*/
virtual get_mappings_ret get_mappings(
Transaction &t,
laddr_list_t &&extent_lisk) = 0;
/**
* Fetches the mapping for laddr_t
*
* Future will not resolve until the pin has resolved (set_paddr called)
*/
using get_mapping_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
using get_mapping_ret = get_mapping_iertr::future<LBAMappingRef>;
virtual get_mapping_ret get_mapping(
Transaction &t,
laddr_t offset) = 0;
/**
* Allocates a new mapping referenced by LBARef
*
* Offset will be relative to the block offset of the record
* This mapping will block from transaction submission until set_paddr
* is called on the LBAMapping.
*/
using alloc_extent_iertr = base_iertr;
using alloc_extent_ret = alloc_extent_iertr::future<LBAMappingRef>;
virtual alloc_extent_ret alloc_extent(
Transaction &t,
laddr_t hint,
extent_len_t len,
paddr_t addr,
LogicalCachedExtent *nextent) = 0;
struct ref_update_result_t {
unsigned refcount = 0;
paddr_t addr;
extent_len_t length = 0;
};
using ref_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
using ref_ret = ref_iertr::future<ref_update_result_t>;
/**
* Decrements ref count on extent
*
* @return returns resulting refcount
*/
virtual ref_ret decref_extent(
Transaction &t,
laddr_t addr) = 0;
/**
* Increments ref count on extent
*
* @return returns resulting refcount
*/
virtual ref_ret incref_extent(
Transaction &t,
laddr_t addr) = 0;
/**
* Should be called after replay on each cached extent.
* Implementation must initialize the LBAMapping on any
* LogicalCachedExtent's and may also read in any dependent
* structures, etc.
*
* @return returns whether the extent is alive
*/
using init_cached_extent_iertr = base_iertr;
using init_cached_extent_ret = init_cached_extent_iertr::future<bool>;
virtual init_cached_extent_ret init_cached_extent(
Transaction &t,
CachedExtentRef e) = 0;
using check_child_trackers_ret = base_iertr::future<>;
virtual check_child_trackers_ret check_child_trackers(Transaction &t) = 0;
/**
* Calls f for each mapping in [begin, end)
*/
using scan_mappings_iertr = base_iertr;
using scan_mappings_ret = scan_mappings_iertr::future<>;
using scan_mappings_func_t = std::function<
void(laddr_t, paddr_t, extent_len_t)>;
virtual scan_mappings_ret scan_mappings(
Transaction &t,
laddr_t begin,
laddr_t end,
scan_mappings_func_t &&f) = 0;
/**
* rewrite_extent
*
* rewrite extent into passed transaction
*/
using rewrite_extent_iertr = base_iertr;
using rewrite_extent_ret = rewrite_extent_iertr::future<>;
virtual rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent) = 0;
/**
* update_mapping
*
* update lba mapping for a delayed allocated extent
*/
using update_mapping_iertr = base_iertr;
using update_mapping_ret = base_iertr::future<>;
virtual update_mapping_ret update_mapping(
Transaction& t,
laddr_t laddr,
paddr_t prev_addr,
paddr_t paddr,
LogicalCachedExtent *nextent) = 0;
/**
* update_mappings
*
* update lba mappings for delayed allocated extents
*/
using update_mappings_iertr = update_mapping_iertr;
using update_mappings_ret = update_mapping_ret;
update_mappings_ret update_mappings(
Transaction& t,
const std::list<LogicalCachedExtentRef>& extents);
/**
* get_physical_extent_if_live
*
* Returns extent at addr/laddr if still live (if laddr
* still points at addr). Extent must be an internal, physical
* extent.
*
* Returns a null CachedExtentRef if extent is not live.
*/
using get_physical_extent_if_live_iertr = base_iertr;
using get_physical_extent_if_live_ret =
get_physical_extent_if_live_iertr::future<CachedExtentRef>;
virtual get_physical_extent_if_live_ret get_physical_extent_if_live(
Transaction &t,
extent_types_t type,
paddr_t addr,
laddr_t laddr,
extent_len_t len) = 0;
virtual ~LBAManager() {}
};
using LBAManagerRef = std::unique_ptr<LBAManager>;
class Cache;
namespace lba_manager {
LBAManagerRef create_lba_manager(Cache &cache);
}
}
| 5,641 | 26.125 | 76 | h |
null | ceph-main/src/crimson/os/seastore/logging.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <fmt/format.h>
#include "crimson/common/log.h"
#define LOGT(level_, MSG, t, ...) \
LOCAL_LOGGER.log(level_, "{} trans.{} {}: " MSG, (void*)&t, \
(t).get_trans_id(), FNAME , ##__VA_ARGS__)
#define SUBLOGT(subname_, level_, MSG, t, ...) \
LOGGER(subname_).log(level_, "{} trans.{} {}: " MSG, (void*)&t, \
(t).get_trans_id(), FNAME , ##__VA_ARGS__)
#define TRACET(...) LOGT(seastar::log_level::trace, __VA_ARGS__)
#define SUBTRACET(subname_, ...) SUBLOGT(subname_, seastar::log_level::trace, __VA_ARGS__)
#define DEBUGT(...) LOGT(seastar::log_level::debug, __VA_ARGS__)
#define SUBDEBUGT(subname_, ...) SUBLOGT(subname_, seastar::log_level::debug, __VA_ARGS__)
#define INFOT(...) LOGT(seastar::log_level::info, __VA_ARGS__)
#define SUBINFOT(subname_, ...) SUBLOGT(subname_, seastar::log_level::info, __VA_ARGS__)
#define WARNT(...) LOGT(seastar::log_level::warn, __VA_ARGS__)
#define SUBWARNT(subname_, ...) SUBLOGT(subname_, seastar::log_level::warn, __VA_ARGS__)
#define ERRORT(...) LOGT(seastar::log_level::error, __VA_ARGS__)
#define SUBERRORT(subname_, ...) SUBLOGT(subname_, seastar::log_level::error, __VA_ARGS__)
| 1,259 | 39.645161 | 90 | h |
null | ceph-main/src/crimson/os/seastore/object_data_handler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <limits>
#include "include/buffer.h"
#include "test/crimson/seastore/test_block.h" // TODO
#include "crimson/os/seastore/onode.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/transaction.h"
namespace crimson::os::seastore {
struct ObjectDataBlock : crimson::os::seastore::LogicalCachedExtent {
using Ref = TCachedExtentRef<ObjectDataBlock>;
ObjectDataBlock(ceph::bufferptr &&ptr)
: LogicalCachedExtent(std::move(ptr)) {}
ObjectDataBlock(const ObjectDataBlock &other)
: LogicalCachedExtent(other) {}
CachedExtentRef duplicate_for_write(Transaction&) final {
return CachedExtentRef(new ObjectDataBlock(*this));
};
static constexpr extent_types_t TYPE = extent_types_t::OBJECT_DATA_BLOCK;
extent_types_t get_type() const final {
return TYPE;
}
ceph::bufferlist get_delta() final {
/* Currently, we always allocate fresh ObjectDataBlock's rather than
* mutating existing ones. */
ceph_assert(0 == "Should be impossible");
}
void apply_delta(const ceph::bufferlist &bl) final {
// See get_delta()
ceph_assert(0 == "Should be impossible");
}
};
using ObjectDataBlockRef = TCachedExtentRef<ObjectDataBlock>;
class ObjectDataHandler {
public:
using base_iertr = TransactionManager::base_iertr;
ObjectDataHandler(uint32_t mos) : max_object_size(mos) {}
struct context_t {
TransactionManager &tm;
Transaction &t;
Onode &onode;
};
/// Writes bl to [offset, offset + bl.length())
using write_iertr = base_iertr;
using write_ret = write_iertr::future<>;
write_ret write(
context_t ctx,
objaddr_t offset,
const bufferlist &bl);
using zero_iertr = base_iertr;
using zero_ret = zero_iertr::future<>;
zero_ret zero(
context_t ctx,
objaddr_t offset,
extent_len_t len);
/// Reads data in [offset, offset + len)
using read_iertr = base_iertr;
using read_ret = read_iertr::future<bufferlist>;
read_ret read(
context_t ctx,
objaddr_t offset,
extent_len_t len);
/// sparse read data, get range interval in [offset, offset + len)
using fiemap_iertr = base_iertr;
using fiemap_ret = fiemap_iertr::future<std::map<uint64_t, uint64_t>>;
fiemap_ret fiemap(
context_t ctx,
objaddr_t offset,
extent_len_t len);
/// Clears data past offset
using truncate_iertr = base_iertr;
using truncate_ret = truncate_iertr::future<>;
truncate_ret truncate(
context_t ctx,
objaddr_t offset);
/// Clears data and reservation
using clear_iertr = base_iertr;
using clear_ret = clear_iertr::future<>;
clear_ret clear(context_t ctx);
private:
/// Updates region [_offset, _offset + bl.length) to bl
write_ret overwrite(
context_t ctx, ///< [in] ctx
laddr_t offset, ///< [in] write offset
extent_len_t len, ///< [in] len to write, len == bl->length() if bl
std::optional<bufferlist> &&bl, ///< [in] buffer to write, empty for zeros
lba_pin_list_t &&pins ///< [in] set of pins overlapping above region
);
/// Ensures object_data reserved region is prepared
write_ret prepare_data_reservation(
context_t ctx,
object_data_t &object_data,
extent_len_t size);
/// Trims data past size
clear_ret trim_data_reservation(
context_t ctx,
object_data_t &object_data,
extent_len_t size);
private:
/**
* max_object_size
*
* For now, we allocate a fixed region of laddr space of size max_object_size
* for any object. In the future, once we have the ability to remap logical
* mappings (necessary for clone), we'll add the ability to grow and shrink
* these regions and remove this assumption.
*/
const uint32_t max_object_size = 0;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::ObjectDataBlock> : fmt::ostream_formatter {};
#endif
| 3,997 | 27.15493 | 102 | h |
null | ceph-main/src/crimson/os/seastore/omap_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
#define OMAP_INNER_BLOCK_SIZE 4096
#define OMAP_LEAF_BLOCK_SIZE 8192
namespace crimson::os::seastore {
std::ostream &operator<<(std::ostream &out, const std::list<std::string> &rhs);
std::ostream &operator<<(std::ostream &out, const std::map<std::string, std::string> &rhs);
class OMapManager {
/* all OMapManager API use reference to transfer input string parameters,
* the upper caller should guarantee the referenced string values alive (not freed)
* until these functions future resolved.
*/
public:
using base_iertr = TransactionManager::base_iertr;
/**
* allocate omap tree root node
*
* @param Transaction &t, current transaction
* @retval return the omap_root_t structure.
*/
using initialize_omap_iertr = base_iertr;
using initialize_omap_ret = initialize_omap_iertr::future<omap_root_t>;
virtual initialize_omap_ret initialize_omap(Transaction &t, laddr_t hint) = 0;
/**
* get value(string) by key(string)
*
* @param omap_root_t &omap_root, omap btree root information
* @param Transaction &t, current transaction
* @param string &key, omap string key
* @retval return string key->string value mapping pair.
*/
using omap_get_value_iertr = base_iertr;
using omap_get_value_ret = omap_get_value_iertr::future<
std::optional<bufferlist>>;
virtual omap_get_value_ret omap_get_value(
const omap_root_t &omap_root,
Transaction &t,
const std::string &key) = 0;
/**
* set key value mapping in omap
*
* @param omap_root_t &omap_root, omap btree root information
* @param Transaction &t, current transaction
* @param string &key, omap string key
* @param string &value, mapped value corresponding key
*/
using omap_set_key_iertr = base_iertr;
using omap_set_key_ret = omap_set_key_iertr::future<>;
virtual omap_set_key_ret omap_set_key(
omap_root_t &omap_root,
Transaction &t,
const std::string &key,
const ceph::bufferlist &value) = 0;
using omap_set_keys_iertr = base_iertr;
using omap_set_keys_ret = omap_set_keys_iertr::future<>;
virtual omap_set_keys_ret omap_set_keys(
omap_root_t &omap_root,
Transaction &t,
std::map<std::string, ceph::bufferlist>&& keys) = 0;
/**
* remove key value mapping in omap tree
*
* @param omap_root_t &omap_root, omap btree root information
* @param Transaction &t, current transaction
* @param string &key, omap string key
*/
using omap_rm_key_iertr = base_iertr;
using omap_rm_key_ret = omap_rm_key_iertr::future<>;
virtual omap_rm_key_ret omap_rm_key(
omap_root_t &omap_root,
Transaction &t,
const std::string &key) = 0;
/**
* omap_list
*
* Scans key/value pairs in order.
*
* @param omap_root: omap btree root information
* @param t: current transaction
* @param first: range start, nullopt sorts before any string,
* behavior based on config.inclusive,
* must alive during the call
* @param last: range end, nullopt sorts after any string,
* behavior based on config.inclusive,
* must alive during the call
* @param config: see below for params
* @retval listed key->value and bool indicating complete
*/
struct omap_list_config_t {
/// max results to return
size_t max_result_size = 128;
/// true denotes behavior like lower_bound, upper_bound otherwise
/// range start behavior
bool first_inclusive = false;
/// range end behavior
bool last_inclusive = false;
omap_list_config_t(
size_t max_result_size,
bool first_inclusive,
bool last_inclusive)
: max_result_size(max_result_size),
first_inclusive(first_inclusive),
last_inclusive(last_inclusive) {}
omap_list_config_t() {}
omap_list_config_t(const omap_list_config_t &) = default;
omap_list_config_t(omap_list_config_t &&) = default;
omap_list_config_t &operator=(const omap_list_config_t &) = default;
omap_list_config_t &operator=(omap_list_config_t &&) = default;
auto with_max(size_t max) {
this->max_result_size = max;
return *this;
}
auto without_max() {
this->max_result_size = std::numeric_limits<size_t>::max();
return *this;
}
auto with_inclusive(
bool first_inclusive,
bool last_inclusive) {
this->first_inclusive = first_inclusive;
this->last_inclusive = last_inclusive;
return *this;
}
auto with_reduced_max(size_t reduced_by) const {
assert(reduced_by <= max_result_size);
return omap_list_config_t(
max_result_size - reduced_by,
first_inclusive,
last_inclusive);
}
};
using omap_list_iertr = base_iertr;
using omap_list_bare_ret = std::tuple<
bool,
std::map<std::string, bufferlist, std::less<>>>;
using omap_list_ret = omap_list_iertr::future<omap_list_bare_ret>;
virtual omap_list_ret omap_list(
const omap_root_t &omap_root,
Transaction &t,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config = omap_list_config_t()) = 0;
/**
* remove key value mappings in a key range from omap tree
*
* @param omap_root_t &omap_root, omap btree root information
* @param Transaction &t, current transaction
* @param string &first, range start
* @param string &last, range end
*/
using omap_rm_key_range_iertr = base_iertr;
using omap_rm_key_range_ret = omap_rm_key_range_iertr::future<>;
virtual omap_rm_key_range_ret omap_rm_key_range(
omap_root_t &omap_root,
Transaction &t,
const std::string &first,
const std::string &last,
omap_list_config_t config) = 0;
/**
* clear all omap tree key->value mapping
*
* @param omap_root_t &omap_root, omap btree root information
* @param Transaction &t, current transaction
*/
using omap_clear_iertr = base_iertr;
using omap_clear_ret = omap_clear_iertr::future<>;
virtual omap_clear_ret omap_clear(omap_root_t &omap_root, Transaction &t) = 0;
virtual ~OMapManager() {}
};
using OMapManagerRef = std::unique_ptr<OMapManager>;
namespace omap_manager {
OMapManagerRef create_omap_manager (
TransactionManager &trans_manager);
}
}
| 6,586 | 30.218009 | 91 | h |
null | ceph-main/src/crimson/os/seastore/onode.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iosfwd>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "include/byteorder.h"
#include "seastore_types.h"
namespace crimson::os::seastore {
struct onode_layout_t {
// The expected decode size of object_info_t without oid.
static constexpr int MAX_OI_LENGTH = 232;
// We might want to move the ss field out of onode_layout_t.
// The reason is that ss_attr may grow to relative large, as
// its clone_overlap may grow to a large size, if applications
// set objects to a relative large size(for the purpose of reducing
// the number of objects per OSD, so that all objects' metadata
// can be cached in memory) and do many modifications between
// snapshots.
// TODO: implement flexible-sized onode value to store inline ss_attr
// effectively.
static constexpr int MAX_SS_LENGTH = 1;
ceph_le32 size{0};
ceph_le32 oi_size{0};
ceph_le32 ss_size{0};
omap_root_le_t omap_root;
omap_root_le_t xattr_root;
object_data_le_t object_data;
char oi[MAX_OI_LENGTH];
char ss[MAX_SS_LENGTH];
} __attribute__((packed));
class Transaction;
/**
* Onode
*
* Interface manipulated by seastore. OnodeManager implementations should
* return objects derived from this interface with layout referencing
* internal representation of onode_layout_t.
*/
class Onode : public boost::intrusive_ref_counter<
Onode,
boost::thread_unsafe_counter>
{
protected:
virtual laddr_t get_hint() const = 0;
const uint32_t default_metadata_offset = 0;
const uint32_t default_metadata_range = 0;
public:
Onode(uint32_t ddr, uint32_t dmr)
: default_metadata_offset(ddr),
default_metadata_range(dmr)
{}
virtual const onode_layout_t &get_layout() const = 0;
virtual onode_layout_t &get_mutable_layout(Transaction &t) = 0;
virtual ~Onode() = default;
laddr_t get_metadata_hint(uint64_t block_size) const {
assert(default_metadata_offset);
assert(default_metadata_range);
uint64_t range_blocks = default_metadata_range / block_size;
return get_hint() + default_metadata_offset +
(((uint32_t)std::rand() % range_blocks) * block_size);
}
laddr_t get_data_hint() const {
return get_hint();
}
};
std::ostream& operator<<(std::ostream &out, const Onode &rhs);
using OnodeRef = boost::intrusive_ptr<Onode>;
}
#if FMT_VERSION >= 90000
template<> struct fmt::formatter<crimson::os::seastore::Onode> : fmt::ostream_formatter {};
#endif
| 2,578 | 27.977528 | 91 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/buffer_fwd.h"
#include "include/ceph_assert.h"
#include "common/hobject.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/onode.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/osd/exceptions.h"
namespace crimson::os::seastore {
class OnodeManager {
using base_iertr = TransactionManager::base_iertr;
public:
using mkfs_iertr = base_iertr;
using mkfs_ret = mkfs_iertr::future<>;
virtual mkfs_ret mkfs(Transaction &t) = 0;
using contains_onode_iertr = base_iertr;
using contains_onode_ret = contains_onode_iertr::future<bool>;
virtual contains_onode_ret contains_onode(
Transaction &trans,
const ghobject_t &hoid) = 0;
using get_onode_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
using get_onode_ret = get_onode_iertr::future<
OnodeRef>;
virtual get_onode_ret get_onode(
Transaction &trans,
const ghobject_t &hoid) = 0;
using get_or_create_onode_iertr = base_iertr::extend<
crimson::ct_error::value_too_large>;
using get_or_create_onode_ret = get_or_create_onode_iertr::future<
OnodeRef>;
virtual get_or_create_onode_ret get_or_create_onode(
Transaction &trans,
const ghobject_t &hoid) = 0;
using get_or_create_onodes_iertr = base_iertr::extend<
crimson::ct_error::value_too_large>;
using get_or_create_onodes_ret = get_or_create_onodes_iertr::future<
std::vector<OnodeRef>>;
virtual get_or_create_onodes_ret get_or_create_onodes(
Transaction &trans,
const std::vector<ghobject_t> &hoids) = 0;
using write_dirty_iertr = base_iertr;
using write_dirty_ret = write_dirty_iertr::future<>;
virtual write_dirty_ret write_dirty(
Transaction &trans,
const std::vector<OnodeRef> &onodes) = 0;
using erase_onode_iertr = base_iertr;
using erase_onode_ret = erase_onode_iertr::future<>;
virtual erase_onode_ret erase_onode(
Transaction &trans,
OnodeRef &onode) = 0;
using list_onodes_iertr = base_iertr;
using list_onodes_bare_ret = std::tuple<std::vector<ghobject_t>, ghobject_t>;
using list_onodes_ret = list_onodes_iertr::future<list_onodes_bare_ret>;
virtual list_onodes_ret list_onodes(
Transaction &trans,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) = 0;
virtual ~OnodeManager() {}
};
using OnodeManagerRef = std::unique_ptr<OnodeManager>;
}
| 2,691 | 29.942529 | 79 | h |
null | ceph-main/src/crimson/os/seastore/ordering_handle.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/shared_mutex.hh>
#include "crimson/common/operation.h"
#include "crimson/osd/osd_operation.h"
namespace crimson::os::seastore {
struct WritePipeline {
struct ReserveProjectedUsage : OrderedExclusivePhaseT<ReserveProjectedUsage> {
constexpr static auto type_name = "WritePipeline::reserve_projected_usage";
} reserve_projected_usage;
struct OolWrites : UnorderedStageT<OolWrites> {
constexpr static auto type_name = "UnorderedStage::ool_writes_stage";
} ool_writes;
struct Prepare : OrderedExclusivePhaseT<Prepare> {
constexpr static auto type_name = "WritePipeline::prepare_phase";
} prepare;
struct DeviceSubmission : OrderedConcurrentPhaseT<DeviceSubmission> {
constexpr static auto type_name = "WritePipeline::device_submission_phase";
} device_submission;
struct Finalize : OrderedExclusivePhaseT<Finalize> {
constexpr static auto type_name = "WritePipeline::finalize_phase";
} finalize;
using BlockingEvents = std::tuple<
ReserveProjectedUsage::BlockingEvent,
OolWrites::BlockingEvent,
Prepare::BlockingEvent,
DeviceSubmission::BlockingEvent,
Finalize::BlockingEvent
>;
};
/**
* PlaceholderOperation
*
* Once seastore is more complete, I expect to update the externally
* facing interfaces to permit passing the osd level operation through.
* Until then (and for tests likely permanently) we'll use this unregistered
* placeholder for the pipeline phases necessary for journal correctness.
*/
class PlaceholderOperation : public crimson::osd::PhasedOperationT<PlaceholderOperation> {
public:
constexpr static auto type = 0U;
constexpr static auto type_name =
"crimson::os::seastore::PlaceholderOperation";
static PlaceholderOperation::IRef create() {
return IRef{new PlaceholderOperation()};
}
PipelineHandle handle;
WritePipeline::BlockingEvents tracking_events;
PipelineHandle& get_handle() {
return handle;
}
private:
void dump_detail(ceph::Formatter *f) const final {}
void print(std::ostream &) const final {}
};
struct OperationProxy {
OperationRef op;
OperationProxy(OperationRef op) : op(std::move(op)) {}
virtual seastar::future<> enter(WritePipeline::ReserveProjectedUsage&) = 0;
virtual seastar::future<> enter(WritePipeline::OolWrites&) = 0;
virtual seastar::future<> enter(WritePipeline::Prepare&) = 0;
virtual seastar::future<> enter(WritePipeline::DeviceSubmission&) = 0;
virtual seastar::future<> enter(WritePipeline::Finalize&) = 0;
virtual void exit() = 0;
virtual seastar::future<> complete() = 0;
virtual ~OperationProxy() = default;
};
template <typename OpT>
struct OperationProxyT : OperationProxy {
OperationProxyT(typename OpT::IRef op) : OperationProxy(op) {}
OpT* that() {
return static_cast<OpT*>(op.get());
}
const OpT* that() const {
return static_cast<const OpT*>(op.get());
}
seastar::future<> enter(WritePipeline::ReserveProjectedUsage& s) final {
return that()->enter_stage(s);
}
seastar::future<> enter(WritePipeline::OolWrites& s) final {
return that()->enter_stage(s);
}
seastar::future<> enter(WritePipeline::Prepare& s) final {
return that()->enter_stage(s);
}
seastar::future<> enter(WritePipeline::DeviceSubmission& s) final {
return that()->enter_stage(s);
}
seastar::future<> enter(WritePipeline::Finalize& s) final {
return that()->enter_stage(s);
}
void exit() final {
return that()->handle.exit();
}
seastar::future<> complete() final {
return that()->handle.complete();
}
};
struct OrderingHandle {
// we can easily optimize this dynalloc out as all concretes are
// supposed to have exactly the same size.
std::unique_ptr<OperationProxy> op;
seastar::shared_mutex *collection_ordering_lock = nullptr;
// in the future we might add further constructors / template to type
// erasure while extracting the location of tracking events.
OrderingHandle(std::unique_ptr<OperationProxy> op) : op(std::move(op)) {}
OrderingHandle(OrderingHandle &&other)
: op(std::move(other.op)),
collection_ordering_lock(other.collection_ordering_lock) {
other.collection_ordering_lock = nullptr;
}
seastar::future<> take_collection_lock(seastar::shared_mutex &mutex) {
ceph_assert(!collection_ordering_lock);
collection_ordering_lock = &mutex;
return collection_ordering_lock->lock();
}
void maybe_release_collection_lock() {
if (collection_ordering_lock) {
collection_ordering_lock->unlock();
collection_ordering_lock = nullptr;
}
}
template <typename T>
seastar::future<> enter(T &t) {
return op->enter(t);
}
void exit() {
op->exit();
}
seastar::future<> complete() {
return op->complete();
}
~OrderingHandle() {
maybe_release_collection_lock();
}
};
inline OrderingHandle get_dummy_ordering_handle() {
using PlaceholderOpProxy = OperationProxyT<PlaceholderOperation>;
return OrderingHandle{
std::make_unique<PlaceholderOpProxy>(PlaceholderOperation::create())};
}
} // namespace crimson::os::seastore
namespace crimson {
template <>
struct EventBackendRegistry<os::seastore::PlaceholderOperation> {
static std::tuple<> get_backends() {
return {};
}
};
} // namespace crimson
| 5,404 | 28.697802 | 90 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iosfwd>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "crimson/os/seastore/seastore_types.h"
#include "include/buffer_fwd.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/transaction.h"
#include "crimson/common/layout.h"
#include "include/buffer.h"
#include "crimson/os/seastore/device.h"
namespace crimson::os::seastore {
struct rbm_shard_info_t {
std::size_t size = 0;
uint64_t start_offset = 0;
DENC(rbm_shard_info_t, v, p) {
DENC_START(1, 1, p);
denc(v.size, p);
denc(v.start_offset, p);
DENC_FINISH(p);
}
};
struct rbm_metadata_header_t {
size_t size = 0;
size_t block_size = 0;
uint64_t feature = 0;
uint64_t journal_size = 0;
checksum_t crc = 0;
device_config_t config;
unsigned int shard_num = 0;
std::vector<rbm_shard_info_t> shard_infos;
DENC(rbm_metadata_header_t, v, p) {
DENC_START(1, 1, p);
denc(v.size, p);
denc(v.block_size, p);
denc(v.feature, p);
denc(v.journal_size, p);
denc(v.crc, p);
denc(v.config, p);
denc(v.shard_num, p);
denc(v.shard_infos, p);
DENC_FINISH(p);
}
void validate() const {
ceph_assert(shard_num == seastar::smp::count);
ceph_assert(block_size > 0);
for (unsigned int i = 0; i < seastar::smp::count; i ++) {
ceph_assert(shard_infos[i].size > block_size &&
shard_infos[i].size % block_size == 0);
ceph_assert_always(shard_infos[i].size <= DEVICE_OFF_MAX);
ceph_assert(journal_size > 0 &&
journal_size % block_size == 0);
ceph_assert(shard_infos[i].start_offset < size &&
shard_infos[i].start_offset % block_size == 0);
}
ceph_assert(config.spec.magic != 0);
ceph_assert(get_default_backend_of_device(config.spec.dtype) ==
backend_type_t::RANDOM_BLOCK);
ceph_assert(config.spec.id <= DEVICE_ID_MAX_VALID);
}
};
enum class rbm_extent_state_t {
FREE, // not allocated
RESERVED, // extent is reserved by alloc_new_extent, but is not persistent
ALLOCATED, // extent is persistent
};
class Device;
using rbm_abs_addr = uint64_t;
constexpr rbm_abs_addr RBM_START_ADDRESS = 0;
class RandomBlockManager {
public:
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
virtual read_ertr::future<> read(paddr_t addr, bufferptr &buffer) = 0;
using write_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::ebadf,
crimson::ct_error::enospc,
crimson::ct_error::erange
>;
virtual write_ertr::future<> write(paddr_t addr, bufferptr &buf) = 0;
using open_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent>;
virtual open_ertr::future<> open() = 0;
using close_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg>;
virtual close_ertr::future<> close() = 0;
using allocate_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enospc
>;
using allocate_ret = allocate_ertr::future<paddr_t>;
// allocator, return start addr of allocated blocks
virtual paddr_t alloc_extent(size_t size) = 0;
virtual void mark_space_used(paddr_t paddr, size_t len) = 0;
virtual void mark_space_free(paddr_t paddr, size_t len) = 0;
virtual void complete_allocation(paddr_t addr, size_t size) = 0;
virtual size_t get_size() const = 0;
virtual extent_len_t get_block_size() const = 0;
virtual uint64_t get_free_blocks() const = 0;
virtual device_id_t get_device_id() const = 0;
virtual const seastore_meta_t &get_meta() const = 0;
virtual Device* get_device() = 0;
virtual paddr_t get_start() = 0;
virtual rbm_extent_state_t get_extent_state(paddr_t addr, size_t size) = 0;
virtual size_t get_journal_size() const = 0;
virtual ~RandomBlockManager() {}
};
using RandomBlockManagerRef = std::unique_ptr<RandomBlockManager>;
inline rbm_abs_addr convert_paddr_to_abs_addr(const paddr_t& paddr) {
const blk_paddr_t& blk_addr = paddr.as_blk_paddr();
return blk_addr.get_device_off();
}
inline paddr_t convert_abs_addr_to_paddr(rbm_abs_addr addr, device_id_t d_id) {
return paddr_t::make_blk_paddr(d_id, addr);
}
namespace random_block_device {
class RBMDevice;
}
seastar::future<std::unique_ptr<random_block_device::RBMDevice>>
get_rb_device(const std::string &device);
std::ostream &operator<<(std::ostream &out, const rbm_metadata_header_t &header);
std::ostream &operator<<(std::ostream &out, const rbm_shard_info_t &shard);
}
WRITE_CLASS_DENC_BOUNDED(
crimson::os::seastore::rbm_shard_info_t
)
WRITE_CLASS_DENC_BOUNDED(
crimson::os::seastore::rbm_metadata_header_t
)
#if FMT_VERSION >= 90000
template<> struct fmt::formatter<crimson::os::seastore::rbm_metadata_header_t> : fmt::ostream_formatter {};
template<> struct fmt::formatter<crimson::os::seastore::rbm_shard_info_t> : fmt::ostream_formatter {};
#endif
| 5,353 | 29.248588 | 107 | h |
null | ceph-main/src/crimson/os/seastore/randomblock_manager_group.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <set>
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/random_block_manager/block_rb_manager.h"
namespace crimson::os::seastore {
class RBMDeviceGroup {
public:
RBMDeviceGroup() {
rb_devices.resize(DEVICE_ID_MAX);
}
const std::set<device_id_t>& get_device_ids() const {
return device_ids;
}
std::vector<RandomBlockManager*> get_rb_managers() const {
assert(device_ids.size());
std::vector<RandomBlockManager*> ret;
for (auto& device_id : device_ids) {
auto rb_device = rb_devices[device_id].get();
assert(rb_device->get_device_id() == device_id);
ret.emplace_back(rb_device);
}
return ret;
}
void add_rb_manager(RandomBlockManagerRef rbm) {
auto device_id = rbm->get_device_id();
ceph_assert(!has_device(device_id));
rb_devices[device_id] = std::move(rbm);
device_ids.insert(device_id);
}
void reset() {
rb_devices.clear();
rb_devices.resize(DEVICE_ID_MAX);
device_ids.clear();
}
auto get_block_size() const {
assert(device_ids.size());
return rb_devices[*device_ids.begin()]->get_block_size();
}
const seastore_meta_t &get_meta() const {
assert(device_ids.size());
return rb_devices[*device_ids.begin()]->get_meta();
}
private:
bool has_device(device_id_t id) const {
assert(id <= DEVICE_ID_MAX_VALID);
return device_ids.count(id) >= 1;
}
std::vector<RandomBlockManagerRef> rb_devices;
std::set<device_id_t> device_ids;
};
using RBMDeviceGroupRef = std::unique_ptr<RBMDeviceGroup>;
}
| 1,787 | 23.833333 | 72 | h |
null | ceph-main/src/crimson/os/seastore/root_block.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/cached_extent.h"
namespace crimson::os::seastore {
/**
* RootBlock
*
* Holds the physical addresses of all metadata roots.
* In-memory values may be
* - absolute: reference to block which predates the current transaction
* - record_relative: reference to block updated in this transaction
* if !pending()
*
* Journal replay only considers deltas and must always discover the most
* recent value for the RootBlock. Because the contents of root_t above are
* very small, it's simplest to stash the entire root_t value into the delta
* and never actually write the RootBlock to a physical location (safe since
* nothing references the location of the RootBlock).
*
* As a result, Cache treats the root differently in a few ways including:
* - state will only ever be DIRTY or MUTATION_PENDING
* - RootBlock's never show up in the transaction fresh or dirty lists --
* there's a special Transaction::root member for when the root needs to
* be mutated.
*
* TODO: Journal trimming will need to be aware of the most recent RootBlock
* delta location, or, even easier, just always write one out with the
* mutation which changes the journal trim bound.
*/
struct RootBlock : CachedExtent {
constexpr static extent_len_t SIZE = 4<<10;
using Ref = TCachedExtentRef<RootBlock>;
root_t root;
CachedExtent* lba_root_node = nullptr;
CachedExtent* backref_root_node = nullptr;
RootBlock() : CachedExtent(zero_length_t()) {};
RootBlock(const RootBlock &rhs)
: CachedExtent(rhs),
root(rhs.root),
lba_root_node(nullptr),
backref_root_node(nullptr)
{}
CachedExtentRef duplicate_for_write(Transaction&) final {
return CachedExtentRef(new RootBlock(*this));
};
static constexpr extent_types_t TYPE = extent_types_t::ROOT;
extent_types_t get_type() const final {
return extent_types_t::ROOT;
}
void on_replace_prior(Transaction &t) final;
/// dumps root as delta
ceph::bufferlist get_delta() final {
ceph::bufferlist bl;
ceph::buffer::ptr bptr(sizeof(root_t));
*reinterpret_cast<root_t*>(bptr.c_str()) = root;
bl.append(bptr);
return bl;
}
/// overwrites root
void apply_delta_and_adjust_crc(paddr_t base, const ceph::bufferlist &_bl) final {
assert(_bl.length() == sizeof(root_t));
ceph::bufferlist bl = _bl;
bl.rebuild();
root = *reinterpret_cast<const root_t*>(bl.front().c_str());
root.adjust_addrs_from_base(base);
}
/// Patches relative addrs in memory based on record commit addr
void on_delta_write(paddr_t record_block_offset) final {
root.adjust_addrs_from_base(record_block_offset);
}
complete_load_ertr::future<> complete_load() final {
ceph_abort_msg("Root is only written via deltas");
}
void on_initial_write() final {
ceph_abort_msg("Root is only written via deltas");
}
root_t &get_root() { return root; }
std::ostream &print_detail(std::ostream &out) const final {
return out << ", root_block(lba_root_node=" << (void*)lba_root_node
<< ", backref_root_node=" << (void*)backref_root_node
<< ")";
}
};
using RootBlockRef = RootBlock::Ref;
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::RootBlock> : fmt::ostream_formatter {};
#endif
| 3,419 | 30.090909 | 96 | h |
null | ceph-main/src/crimson/os/seastore/seastore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <unordered_map>
#include <map>
#include <typeinfo>
#include <vector>
#include <optional>
#include <seastar/core/future.hh>
#include <seastar/core/metrics_types.hh>
#include "include/uuid.h"
#include "os/Transaction.h"
#include "crimson/common/throttle.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/os/futurized_store.h"
#include "crimson/os/seastore/device.h"
#include "crimson/os/seastore/transaction.h"
#include "crimson/os/seastore/onode_manager.h"
#include "crimson/os/seastore/omap_manager.h"
#include "crimson/os/seastore/collection_manager.h"
#include "crimson/os/seastore/object_data_handler.h"
namespace crimson::os::seastore {
class Onode;
using OnodeRef = boost::intrusive_ptr<Onode>;
class TransactionManager;
enum class op_type_t : uint8_t {
TRANSACTION = 0,
READ,
WRITE,
GET_ATTR,
GET_ATTRS,
STAT,
OMAP_GET_VALUES,
OMAP_LIST,
MAX
};
class SeastoreCollection final : public FuturizedCollection {
public:
template <typename... T>
SeastoreCollection(T&&... args) :
FuturizedCollection(std::forward<T>(args)...) {}
seastar::shared_mutex ordering_lock;
};
/**
* col_obj_ranges_t
*
* Represents the two ghobject_t ranges spanned by a PG collection.
* Temp objects will be within [temp_begin, temp_end) and normal objects
* will be in [obj_begin, obj_end).
*/
struct col_obj_ranges_t {
ghobject_t temp_begin;
ghobject_t temp_end;
ghobject_t obj_begin;
ghobject_t obj_end;
};
class SeaStore final : public FuturizedStore {
public:
class MDStore {
public:
using base_iertr = crimson::errorator<
crimson::ct_error::input_output_error
>;
using write_meta_ertr = base_iertr;
using write_meta_ret = write_meta_ertr::future<>;
virtual write_meta_ret write_meta(
const std::string &key,
const std::string &val
) = 0;
using read_meta_ertr = base_iertr;
using read_meta_ret = write_meta_ertr::future<std::optional<std::string>>;
virtual read_meta_ret read_meta(const std::string &key) = 0;
virtual ~MDStore() {}
};
using MDStoreRef = std::unique_ptr<MDStore>;
class Shard : public FuturizedStore::Shard {
public:
Shard(
std::string root,
Device* device,
bool is_test);
~Shard() = default;
seastar::future<struct stat> stat(
CollectionRef c,
const ghobject_t& oid) final;
read_errorator::future<ceph::bufferlist> read(
CollectionRef c,
const ghobject_t& oid,
uint64_t offset,
size_t len,
uint32_t op_flags = 0) final;
read_errorator::future<ceph::bufferlist> readv(
CollectionRef c,
const ghobject_t& oid,
interval_set<uint64_t>& m,
uint32_t op_flags = 0) final;
get_attr_errorator::future<ceph::bufferlist> get_attr(
CollectionRef c,
const ghobject_t& oid,
std::string_view name) const final;
get_attrs_ertr::future<attrs_t> get_attrs(
CollectionRef c,
const ghobject_t& oid) final;
read_errorator::future<omap_values_t> omap_get_values(
CollectionRef c,
const ghobject_t& oid,
const omap_keys_t& keys) final;
/// Retrieves paged set of values > start (if present)
using omap_get_values_ret_bare_t = std::tuple<bool, omap_values_t>;
using omap_get_values_ret_t = read_errorator::future<
omap_get_values_ret_bare_t>;
omap_get_values_ret_t omap_get_values(
CollectionRef c, ///< [in] collection
const ghobject_t &oid, ///< [in] oid
const std::optional<std::string> &start ///< [in] start, empty for begin
) final; ///< @return <done, values> values.empty() iff done
get_attr_errorator::future<bufferlist> omap_get_header(
CollectionRef c,
const ghobject_t& oid) final;
seastar::future<std::tuple<std::vector<ghobject_t>, ghobject_t>> list_objects(
CollectionRef c,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) const final;
seastar::future<CollectionRef> create_new_collection(const coll_t& cid) final;
seastar::future<CollectionRef> open_collection(const coll_t& cid) final;
seastar::future<> do_transaction_no_callbacks(
CollectionRef ch,
ceph::os::Transaction&& txn) final;
/* Note, flush() machinery must go through the same pipeline
* stages and locks as do_transaction. */
seastar::future<> flush(CollectionRef ch) final;
read_errorator::future<std::map<uint64_t, uint64_t>> fiemap(
CollectionRef ch,
const ghobject_t& oid,
uint64_t off,
uint64_t len) final;
unsigned get_max_attr_name_length() const final {
return 256;
}
// only exposed to SeaStore
public:
seastar::future<> umount();
// init managers and mount transaction_manager
seastar::future<> mount_managers();
void set_secondaries(Device& sec_dev) {
secondaries.emplace_back(&sec_dev);
}
using coll_core_t = FuturizedStore::coll_core_t;
seastar::future<std::vector<coll_core_t>> list_collections();
seastar::future<> write_meta(const std::string& key,
const std::string& value);
store_statfs_t stat() const;
uuid_d get_fsid() const;
seastar::future<> mkfs_managers();
void init_managers();
private:
struct internal_context_t {
CollectionRef ch;
ceph::os::Transaction ext_transaction;
internal_context_t(
CollectionRef ch,
ceph::os::Transaction &&_ext_transaction,
TransactionRef &&transaction)
: ch(ch), ext_transaction(std::move(_ext_transaction)),
transaction(std::move(transaction)),
iter(ext_transaction.begin()) {}
TransactionRef transaction;
ceph::os::Transaction::iterator iter;
std::chrono::steady_clock::time_point begin_timestamp = std::chrono::steady_clock::now();
void reset_preserve_handle(TransactionManager &tm) {
tm.reset_transaction_preserve_handle(*transaction);
iter = ext_transaction.begin();
}
};
TransactionManager::read_extent_iertr::future<std::optional<unsigned>>
get_coll_bits(CollectionRef ch, Transaction &t) const;
static void on_error(ceph::os::Transaction &t);
template <typename F>
auto repeat_with_internal_context(
CollectionRef ch,
ceph::os::Transaction &&t,
Transaction::src_t src,
const char* tname,
op_type_t op_type,
F &&f) {
return seastar::do_with(
internal_context_t(
ch, std::move(t),
transaction_manager->create_transaction(src, tname)),
std::forward<F>(f),
[this, op_type](auto &ctx, auto &f) {
return ctx.transaction->get_handle().take_collection_lock(
static_cast<SeastoreCollection&>(*(ctx.ch)).ordering_lock
).then([this] {
return throttler.get(1);
}).then([&, this] {
return repeat_eagain([&, this] {
ctx.reset_preserve_handle(*transaction_manager);
return std::invoke(f, ctx);
}).handle_error(
crimson::ct_error::eagain::pass_further{},
crimson::ct_error::all_same_way([&ctx](auto e) {
on_error(ctx.ext_transaction);
})
);
}).then([this, op_type, &ctx] {
add_latency_sample(op_type,
std::chrono::steady_clock::now() - ctx.begin_timestamp);
}).finally([this] {
throttler.put();
});
});
}
template <typename Ret, typename F>
auto repeat_with_onode(
CollectionRef ch,
const ghobject_t &oid,
Transaction::src_t src,
const char* tname,
op_type_t op_type,
F &&f) const {
auto begin_time = std::chrono::steady_clock::now();
return seastar::do_with(
oid, Ret{}, std::forward<F>(f),
[this, src, op_type, begin_time, tname
](auto &oid, auto &ret, auto &f)
{
return repeat_eagain([&, this, src, tname] {
return transaction_manager->with_transaction_intr(
src,
tname,
[&, this](auto& t)
{
return onode_manager->get_onode(t, oid
).si_then([&](auto onode) {
return seastar::do_with(std::move(onode), [&](auto& onode) {
return f(t, *onode);
});
}).si_then([&ret](auto _ret) {
ret = _ret;
});
});
}).safe_then([&ret, op_type, begin_time, this] {
const_cast<Shard*>(this)->add_latency_sample(op_type,
std::chrono::steady_clock::now() - begin_time);
return seastar::make_ready_future<Ret>(ret);
});
});
}
using _fiemap_ret = ObjectDataHandler::fiemap_ret;
_fiemap_ret _fiemap(
Transaction &t,
Onode &onode,
uint64_t off,
uint64_t len) const;
using _omap_get_value_iertr = OMapManager::base_iertr::extend<
crimson::ct_error::enodata
>;
using _omap_get_value_ret = _omap_get_value_iertr::future<ceph::bufferlist>;
_omap_get_value_ret _omap_get_value(
Transaction &t,
omap_root_t &&root,
std::string_view key) const;
using _omap_get_values_iertr = OMapManager::base_iertr;
using _omap_get_values_ret = _omap_get_values_iertr::future<omap_values_t>;
_omap_get_values_ret _omap_get_values(
Transaction &t,
omap_root_t &&root,
const omap_keys_t &keys) const;
friend class SeaStoreOmapIterator;
using omap_list_bare_ret = OMapManager::omap_list_bare_ret;
using omap_list_ret = OMapManager::omap_list_ret;
omap_list_ret omap_list(
Onode &onode,
const omap_root_le_t& omap_root,
Transaction& t,
const std::optional<std::string>& start,
OMapManager::omap_list_config_t config) const;
using tm_iertr = TransactionManager::base_iertr;
using tm_ret = tm_iertr::future<>;
tm_ret _do_transaction_step(
internal_context_t &ctx,
CollectionRef &col,
std::vector<OnodeRef> &onodes,
std::vector<OnodeRef> &d_onodes,
ceph::os::Transaction::iterator &i);
tm_ret _remove(
internal_context_t &ctx,
OnodeRef &onode);
tm_ret _touch(
internal_context_t &ctx,
OnodeRef &onode);
tm_ret _write(
internal_context_t &ctx,
OnodeRef &onode,
uint64_t offset, size_t len,
ceph::bufferlist &&bl,
uint32_t fadvise_flags);
tm_ret _zero(
internal_context_t &ctx,
OnodeRef &onode,
objaddr_t offset, extent_len_t len);
tm_ret _omap_set_values(
internal_context_t &ctx,
OnodeRef &onode,
std::map<std::string, ceph::bufferlist> &&aset);
tm_ret _omap_set_header(
internal_context_t &ctx,
OnodeRef &onode,
ceph::bufferlist &&header);
tm_ret _omap_clear(
internal_context_t &ctx,
OnodeRef &onode);
tm_ret _omap_rmkeys(
internal_context_t &ctx,
OnodeRef &onode,
omap_keys_t &&aset);
tm_ret _omap_rmkeyrange(
internal_context_t &ctx,
OnodeRef &onode,
std::string first,
std::string last);
tm_ret _truncate(
internal_context_t &ctx,
OnodeRef &onode, uint64_t size);
tm_ret _setattrs(
internal_context_t &ctx,
OnodeRef &onode,
std::map<std::string,bufferlist>&& aset);
tm_ret _rmattr(
internal_context_t &ctx,
OnodeRef &onode,
std::string name);
tm_ret _rmattrs(
internal_context_t &ctx,
OnodeRef &onode);
tm_ret _xattr_rmattr(
internal_context_t &ctx,
OnodeRef &onode,
std::string &&name);
tm_ret _xattr_clear(
internal_context_t &ctx,
OnodeRef &onode);
tm_ret _create_collection(
internal_context_t &ctx,
const coll_t& cid, int bits);
tm_ret _remove_collection(
internal_context_t &ctx,
const coll_t& cid);
using omap_set_kvs_ret = tm_iertr::future<>;
omap_set_kvs_ret _omap_set_kvs(
OnodeRef &onode,
const omap_root_le_t& omap_root,
Transaction& t,
omap_root_le_t& mutable_omap_root,
std::map<std::string, ceph::bufferlist>&& kvs);
boost::intrusive_ptr<SeastoreCollection> _get_collection(const coll_t& cid);
static constexpr auto LAT_MAX = static_cast<std::size_t>(op_type_t::MAX);
struct {
std::array<seastar::metrics::histogram, LAT_MAX> op_lat;
} stats;
seastar::metrics::histogram& get_latency(
op_type_t op_type) {
assert(static_cast<std::size_t>(op_type) < stats.op_lat.size());
return stats.op_lat[static_cast<std::size_t>(op_type)];
}
void add_latency_sample(op_type_t op_type,
std::chrono::steady_clock::duration dur) {
seastar::metrics::histogram& lat = get_latency(op_type);
lat.sample_count++;
lat.sample_sum += std::chrono::duration_cast<std::chrono::milliseconds>(dur).count();
}
private:
std::string root;
Device* device;
const uint32_t max_object_size;
bool is_test;
std::vector<Device*> secondaries;
TransactionManagerRef transaction_manager;
CollectionManagerRef collection_manager;
OnodeManagerRef onode_manager;
common::Throttle throttler;
seastar::metrics::metric_group metrics;
void register_metrics();
};
public:
SeaStore(
const std::string& root,
MDStoreRef mdstore);
~SeaStore();
seastar::future<> start() final;
seastar::future<> stop() final;
mount_ertr::future<> mount() final;
seastar::future<> umount() final;
mkfs_ertr::future<> mkfs(uuid_d new_osd_fsid) final;
seastar::future<store_statfs_t> stat() const final;
uuid_d get_fsid() const final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.local().get_fsid();
}
seastar::future<> write_meta(
const std::string& key,
const std::string& value) final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.local().write_meta(
key, value).then([this, key, value] {
return mdstore->write_meta(key, value);
}).handle_error(
crimson::ct_error::assert_all{"Invalid error in SeaStore::write_meta"}
);
}
seastar::future<std::tuple<int, std::string>> read_meta(const std::string& key) final;
seastar::future<std::vector<coll_core_t>> list_collections() final;
FuturizedStore::Shard& get_sharded_store() final {
return shard_stores.local();
}
static col_obj_ranges_t
get_objs_range(CollectionRef ch, unsigned bits);
// for test
public:
mount_ertr::future<> test_mount();
mkfs_ertr::future<> test_mkfs(uuid_d new_osd_fsid);
DeviceRef get_primary_device_ref() {
return std::move(device);
}
seastar::future<> test_start(DeviceRef dev);
private:
seastar::future<> write_fsid(uuid_d new_osd_fsid);
seastar::future<> prepare_meta(uuid_d new_osd_fsid);
seastar::future<> set_secondaries();
private:
std::string root;
MDStoreRef mdstore;
DeviceRef device;
std::vector<DeviceRef> secondaries;
seastar::sharded<SeaStore::Shard> shard_stores;
};
std::unique_ptr<SeaStore> make_seastore(
const std::string &device);
std::unique_ptr<SeaStore> make_test_seastore(
SeaStore::MDStoreRef mdstore);
}
| 15,257 | 27.897727 | 95 | h |
null | ceph-main/src/crimson/os/seastore/segment_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iosfwd>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <seastar/core/future.hh>
#include "include/buffer_fwd.h"
#include "include/ceph_assert.h"
#include "crimson/common/config_proxy.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/osd/exceptions.h"
#include "device.h"
namespace crimson::os::seastore {
using std::vector;
struct block_shard_info_t {
std::size_t size;
std::size_t segments;
uint64_t tracker_offset;
uint64_t first_segment_offset;
DENC(block_shard_info_t, v, p) {
DENC_START(1, 1, p);
denc(v.size, p);
denc(v.segments, p);
denc(v.tracker_offset, p);
denc(v.first_segment_offset, p);
DENC_FINISH(p);
}
};
struct block_sm_superblock_t {
unsigned int shard_num = 0;
size_t segment_size = 0;
size_t block_size = 0;
std::vector<block_shard_info_t> shard_infos;
device_config_t config;
DENC(block_sm_superblock_t, v, p) {
DENC_START(1, 1, p);
denc(v.shard_num, p);
denc(v.segment_size, p);
denc(v.block_size, p);
denc(v.shard_infos, p);
denc(v.config, p);
DENC_FINISH(p);
}
void validate() const {
ceph_assert(shard_num == seastar::smp::count);
ceph_assert(block_size > 0);
ceph_assert(segment_size > 0 &&
segment_size % block_size == 0);
ceph_assert_always(segment_size <= SEGMENT_OFF_MAX);
for (unsigned int i = 0; i < seastar::smp::count; i ++) {
ceph_assert(shard_infos[i].size > segment_size &&
shard_infos[i].size % block_size == 0);
ceph_assert_always(shard_infos[i].size <= DEVICE_OFF_MAX);
ceph_assert(shard_infos[i].segments > 0);
ceph_assert_always(shard_infos[i].segments <= DEVICE_SEGMENT_ID_MAX);
ceph_assert(shard_infos[i].tracker_offset > 0 &&
shard_infos[i].tracker_offset % block_size == 0);
ceph_assert(shard_infos[i].first_segment_offset > shard_infos[i].tracker_offset &&
shard_infos[i].first_segment_offset % block_size == 0);
}
ceph_assert(config.spec.magic != 0);
ceph_assert(get_default_backend_of_device(config.spec.dtype) ==
backend_type_t::SEGMENTED);
ceph_assert(config.spec.id <= DEVICE_ID_MAX_VALID);
if (!config.major_dev) {
ceph_assert(config.secondary_devices.size() == 0);
}
for (const auto& [k, v] : config.secondary_devices) {
ceph_assert(k != config.spec.id);
ceph_assert(k <= DEVICE_ID_MAX_VALID);
ceph_assert(k == v.id);
ceph_assert(v.magic != 0);
ceph_assert(v.dtype > device_type_t::NONE);
ceph_assert(v.dtype < device_type_t::NUM_TYPES);
}
}
};
std::ostream& operator<<(std::ostream&, const block_shard_info_t&);
std::ostream& operator<<(std::ostream&, const block_sm_superblock_t&);
class Segment : public boost::intrusive_ref_counter<
Segment,
boost::thread_unsafe_counter>{
public:
enum class segment_state_t : uint8_t {
EMPTY = 0,
OPEN = 1,
CLOSED = 2
};
/**
* get_segment_id
*/
virtual segment_id_t get_segment_id() const = 0;
/**
* min next write location
*/
virtual segment_off_t get_write_ptr() const = 0;
/**
* max capacity
*/
virtual segment_off_t get_write_capacity() const = 0;
/**
* close
*
* Closes segment for writes. Won't complete until
* outstanding writes to this segment are complete.
*/
using close_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent>;
virtual close_ertr::future<> close() = 0;
/**
* write
*
* @param offset offset of write, must be aligned to <> and >= write pointer, advances
* write pointer
* @param bl buffer to write, will be padded if not aligned
*/
using write_ertr = crimson::errorator<
crimson::ct_error::input_output_error, // media error or corruption
crimson::ct_error::invarg, // if offset is < write pointer or misaligned
crimson::ct_error::ebadf, // segment closed
crimson::ct_error::enospc // write exceeds segment size
>;
virtual write_ertr::future<> write(
segment_off_t offset, ceph::bufferlist bl) = 0;
/**
* advance_wp
*
* advance the segment write pointer,
* needed when writing at wp is strictly implemented. ex: ZBD backed segments
* @param offset: advance write pointer till the given offset
*/
virtual write_ertr::future<> advance_wp(
segment_off_t offset) = 0;
virtual ~Segment() {}
};
using SegmentRef = boost::intrusive_ptr<Segment>;
std::ostream& operator<<(std::ostream& out, Segment::segment_state_t);
constexpr size_t PADDR_SIZE = sizeof(paddr_t);
class SegmentManager;
using SegmentManagerRef = std::unique_ptr<SegmentManager>;
class SegmentManager : public Device {
public:
backend_type_t get_backend_type() const final {
return backend_type_t::SEGMENTED;
}
using open_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent>;
virtual open_ertr::future<SegmentRef> open(segment_id_t id) = 0;
using release_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent>;
virtual release_ertr::future<> release(segment_id_t id) = 0;
/* Methods for discovering device geometry, segmentid set, etc */
virtual segment_off_t get_segment_size() const = 0;
virtual device_segment_id_t get_num_segments() const {
ceph_assert(get_available_size() % get_segment_size() == 0);
return ((device_segment_id_t)(get_available_size() / get_segment_size()));
}
virtual ~SegmentManager() {}
static seastar::future<SegmentManagerRef>
get_segment_manager(const std::string &device, device_type_t dtype);
};
}
WRITE_CLASS_DENC(
crimson::os::seastore::block_shard_info_t
)
WRITE_CLASS_DENC(
crimson::os::seastore::block_sm_superblock_t
)
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::block_shard_info_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::block_sm_superblock_t> : fmt::ostream_formatter {};
#endif
| 6,414 | 28.562212 | 108 | h |
null | ceph-main/src/crimson/os/seastore/segment_manager_group.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <set>
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/segment_manager.h"
namespace crimson::os::seastore {
class SegmentManagerGroup {
public:
SegmentManagerGroup() {
segment_managers.resize(DEVICE_ID_MAX, nullptr);
}
const std::set<device_id_t>& get_device_ids() const {
return device_ids;
}
std::vector<SegmentManager*> get_segment_managers() const {
assert(device_ids.size());
std::vector<SegmentManager*> ret;
for (auto& device_id : device_ids) {
auto segment_manager = segment_managers[device_id];
assert(segment_manager->get_device_id() == device_id);
ret.emplace_back(segment_manager);
}
return ret;
}
void add_segment_manager(SegmentManager* segment_manager) {
auto device_id = segment_manager->get_device_id();
ceph_assert(!has_device(device_id));
if (!device_ids.empty()) {
auto existing_id = *device_ids.begin();
ceph_assert(segment_managers[existing_id]->get_device_type()
== segment_manager->get_device_type());
}
segment_managers[device_id] = segment_manager;
device_ids.insert(device_id);
}
void reset() {
segment_managers.clear();
segment_managers.resize(DEVICE_ID_MAX, nullptr);
device_ids.clear();
}
/**
* get device info
*
* Assume all segment managers share the same following information.
*/
extent_len_t get_block_size() const {
assert(device_ids.size());
return segment_managers[*device_ids.begin()]->get_block_size();
}
segment_off_t get_segment_size() const {
assert(device_ids.size());
return segment_managers[*device_ids.begin()]->get_segment_size();
}
const seastore_meta_t &get_meta() const {
assert(device_ids.size());
return segment_managers[*device_ids.begin()]->get_meta();
}
std::size_t get_rounded_header_length() const {
return p2roundup(
ceph::encoded_sizeof_bounded<segment_header_t>(),
(std::size_t)get_block_size());
}
std::size_t get_rounded_tail_length() const {
return p2roundup(
ceph::encoded_sizeof_bounded<segment_tail_t>(),
(std::size_t)get_block_size());
}
using read_segment_header_ertr = crimson::errorator<
crimson::ct_error::enoent,
crimson::ct_error::enodata,
crimson::ct_error::input_output_error
>;
using read_segment_header_ret = read_segment_header_ertr::future<
segment_header_t>;
read_segment_header_ret read_segment_header(segment_id_t segment);
using read_segment_tail_ertr = read_segment_header_ertr;
using read_segment_tail_ret = read_segment_tail_ertr::future<
segment_tail_t>;
read_segment_tail_ret read_segment_tail(segment_id_t segment);
using read_ertr = SegmentManager::read_ertr;
using scan_valid_records_ertr = read_ertr;
using scan_valid_records_ret = scan_valid_records_ertr::future<
size_t>;
using found_record_handler_t = std::function<
scan_valid_records_ertr::future<>(
record_locator_t record_locator,
// callee may assume header and bl will remain valid until
// returned future resolves
const record_group_header_t &header,
const bufferlist &mdbuf)>;
scan_valid_records_ret scan_valid_records(
scan_valid_records_cursor &cursor, ///< [in, out] cursor, updated during call
segment_nonce_t nonce, ///< [in] nonce for segment
size_t budget, ///< [in] max budget to use
found_record_handler_t &handler ///< [in] handler for records
); ///< @return used budget
/*
* read journal segment headers
*/
using find_journal_segment_headers_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using find_journal_segment_headers_ret_bare = std::vector<
std::pair<segment_id_t, segment_header_t>>;
using find_journal_segment_headers_ret = find_journal_segment_headers_ertr::future<
find_journal_segment_headers_ret_bare>;
find_journal_segment_headers_ret find_journal_segment_headers();
using open_ertr = SegmentManager::open_ertr;
open_ertr::future<SegmentRef> open(segment_id_t id) {
assert(has_device(id.device_id()));
return segment_managers[id.device_id()]->open(id);
}
using release_ertr = SegmentManager::release_ertr;
release_ertr::future<> release_segment(segment_id_t id) {
assert(has_device(id.device_id()));
return segment_managers[id.device_id()]->release(id);
}
private:
bool has_device(device_id_t id) const {
assert(id <= DEVICE_ID_MAX_VALID);
return device_ids.count(id) >= 1;
}
/// read record metadata for record starting at start
using read_validate_record_metadata_ertr = read_ertr;
using read_validate_record_metadata_ret =
read_validate_record_metadata_ertr::future<
std::optional<std::pair<record_group_header_t, bufferlist>>
>;
read_validate_record_metadata_ret read_validate_record_metadata(
paddr_t start,
segment_nonce_t nonce);
/// read and validate data
using read_validate_data_ertr = read_ertr;
using read_validate_data_ret = read_validate_data_ertr::future<bool>;
read_validate_data_ret read_validate_data(
paddr_t record_base,
const record_group_header_t &header ///< caller must ensure lifetime through
/// future resolution
);
using consume_record_group_ertr = scan_valid_records_ertr;
consume_record_group_ertr::future<> consume_next_records(
scan_valid_records_cursor& cursor,
found_record_handler_t& handler,
std::size_t& budget_used);
std::vector<SegmentManager*> segment_managers;
std::set<device_id_t> device_ids;
};
using SegmentManagerGroupRef = std::unique_ptr<SegmentManagerGroup>;
} // namespace crimson::os::seastore
| 5,917 | 32.247191 | 85 | h |
null | ceph-main/src/crimson/os/seastore/segment_seq_allocator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/seastore_types.h"
namespace crimson::os::seastore {
class AsyncCleaner;
}
namespace crimson::os::seastore::journal {
class SegmentedJournal;
}
namespace crimson::os::seastore {
class SegmentSeqAllocator {
public:
SegmentSeqAllocator(segment_type_t type)
: type(type) {}
segment_seq_t get_and_inc_next_segment_seq() {
return next_segment_seq++;
}
private:
void set_next_segment_seq(segment_seq_t seq) {
LOG_PREFIX(SegmentSeqAllocator::set_next_segment_seq);
SUBDEBUG(
seastore_journal,
"{}, next={}, cur={}",
type,
segment_seq_printer_t{seq},
segment_seq_printer_t{next_segment_seq});
assert(type == segment_type_t::JOURNAL
? seq >= next_segment_seq
: true);
if (seq > next_segment_seq)
next_segment_seq = seq;
}
segment_seq_t next_segment_seq = 0;
segment_type_t type = segment_type_t::NULL_SEG;
friend class journal::SegmentedJournal;
friend class SegmentCleaner;
};
using SegmentSeqAllocatorRef =
std::unique_ptr<SegmentSeqAllocator>;
};
| 1,222 | 22.980392 | 70 | h |
null | ceph-main/src/crimson/os/seastore/transaction.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <boost/intrusive/list.hpp>
#include "crimson/common/log.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/ordering_handle.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/root_block.h"
namespace crimson::os::seastore {
class SeaStore;
class Transaction;
struct io_stat_t {
uint64_t num = 0;
uint64_t bytes = 0;
bool is_clear() const {
return (num == 0 && bytes == 0);
}
void increment(uint64_t _bytes) {
++num;
bytes += _bytes;
}
void increment_stat(const io_stat_t& stat) {
num += stat.num;
bytes += stat.bytes;
}
};
inline std::ostream& operator<<(std::ostream& out, const io_stat_t& stat) {
return out << stat.num << "(" << stat.bytes << "B)";
}
struct version_stat_t {
uint64_t num = 0;
uint64_t version = 0;
bool is_clear() const {
return (num == 0 && version == 0);
}
void increment(extent_version_t v) {
++num;
version += v;
}
void increment_stat(const version_stat_t& stat) {
num += stat.num;
version += stat.version;
}
};
/**
* Transaction
*
* Representation of in-progress mutation. Used exclusively through Cache methods.
*
* Transaction log levels:
* seastore_t
* - DEBUG: transaction create, conflict, commit events
* - TRACE: DEBUG details
* - seastore_cache logs
*/
class Transaction {
public:
using Ref = std::unique_ptr<Transaction>;
using on_destruct_func_t = std::function<void(Transaction&)>;
enum class get_extent_ret {
PRESENT,
ABSENT,
RETIRED
};
get_extent_ret get_extent(paddr_t addr, CachedExtentRef *out) {
LOG_PREFIX(Transaction::get_extent);
// it's possible that both write_set and retired_set contain
// this addr at the same time when addr is absolute and the
// corresponding extent is used to map existing extent on disk.
// So search write_set first.
if (auto iter = write_set.find_offset(addr);
iter != write_set.end()) {
if (out)
*out = CachedExtentRef(&*iter);
SUBTRACET(seastore_cache, "{} is present in write_set -- {}",
*this, addr, *iter);
assert((*out)->is_valid());
return get_extent_ret::PRESENT;
} else if (retired_set.count(addr)) {
return get_extent_ret::RETIRED;
} else if (
auto iter = read_set.find(addr);
iter != read_set.end()) {
// placeholder in read-set should be in the retired-set
// at the same time.
assert(iter->ref->get_type() != extent_types_t::RETIRED_PLACEHOLDER);
if (out)
*out = iter->ref;
SUBTRACET(seastore_cache, "{} is present in read_set -- {}",
*this, addr, *(iter->ref));
return get_extent_ret::PRESENT;
} else {
return get_extent_ret::ABSENT;
}
}
void add_to_retired_set(CachedExtentRef ref) {
ceph_assert(!is_weak());
if (ref->is_exist_clean() ||
ref->is_exist_mutation_pending()) {
existing_block_stats.dec(ref);
ref->set_invalid(*this);
write_set.erase(*ref);
} else if (ref->is_initial_pending()) {
ref->set_invalid(*this);
write_set.erase(*ref);
} else if (ref->is_mutation_pending()) {
ref->set_invalid(*this);
write_set.erase(*ref);
assert(ref->prior_instance);
retired_set.insert(ref->prior_instance);
assert(read_set.count(ref->prior_instance->get_paddr()));
ref->prior_instance.reset();
} else {
// && retired_set.count(ref->get_paddr()) == 0
// If it's already in the set, insert here will be a noop,
// which is what we want.
retired_set.insert(ref);
}
}
void add_to_read_set(CachedExtentRef ref) {
if (is_weak()) return;
assert(ref->is_valid());
auto it = ref->transactions.lower_bound(
this, read_set_item_t<Transaction>::trans_cmp_t());
if (it != ref->transactions.end() && it->t == this) return;
auto [iter, inserted] = read_set.emplace(this, ref);
ceph_assert(inserted);
ref->transactions.insert_before(
it, const_cast<read_set_item_t<Transaction>&>(*iter));
}
void add_fresh_extent(
CachedExtentRef ref) {
ceph_assert(!is_weak());
if (ref->is_exist_clean()) {
existing_block_stats.inc(ref);
existing_block_list.push_back(ref);
} else if (ref->get_paddr().is_delayed()) {
assert(ref->get_paddr() == make_delayed_temp_paddr(0));
assert(ref->is_logical());
ref->set_paddr(make_delayed_temp_paddr(delayed_temp_offset));
delayed_temp_offset += ref->get_length();
delayed_alloc_list.emplace_back(ref->cast<LogicalCachedExtent>());
fresh_block_stats.increment(ref->get_length());
} else if (ref->get_paddr().is_absolute()) {
pre_alloc_list.emplace_back(ref->cast<LogicalCachedExtent>());
fresh_block_stats.increment(ref->get_length());
} else {
if (likely(ref->get_paddr() == make_record_relative_paddr(0))) {
ref->set_paddr(make_record_relative_paddr(offset));
} else {
ceph_assert(ref->get_paddr().is_fake());
}
offset += ref->get_length();
inline_block_list.push_back(ref);
fresh_block_stats.increment(ref->get_length());
}
write_set.insert(*ref);
if (is_backref_node(ref->get_type()))
fresh_backref_extents++;
}
uint64_t get_num_fresh_backref() const {
return fresh_backref_extents;
}
void mark_delayed_extent_inline(LogicalCachedExtentRef& ref) {
write_set.erase(*ref);
assert(ref->get_paddr().is_delayed());
ref->set_paddr(make_record_relative_paddr(offset),
/* need_update_mapping: */ true);
offset += ref->get_length();
inline_block_list.push_back(ref);
write_set.insert(*ref);
}
void mark_delayed_extent_ool(LogicalCachedExtentRef& ref) {
written_ool_block_list.push_back(ref);
}
void update_delayed_ool_extent_addr(LogicalCachedExtentRef& ref,
paddr_t final_addr) {
write_set.erase(*ref);
assert(ref->get_paddr().is_delayed());
ref->set_paddr(final_addr, /* need_update_mapping: */ true);
assert(!ref->get_paddr().is_null());
assert(!ref->is_inline());
write_set.insert(*ref);
}
void mark_allocated_extent_ool(LogicalCachedExtentRef& ref) {
assert(ref->get_paddr().is_absolute());
assert(!ref->is_inline());
written_ool_block_list.push_back(ref);
}
void add_mutated_extent(CachedExtentRef ref) {
ceph_assert(!is_weak());
assert(ref->is_exist_mutation_pending() ||
read_set.count(ref->prior_instance->get_paddr()));
mutated_block_list.push_back(ref);
if (!ref->is_exist_mutation_pending()) {
write_set.insert(*ref);
} else {
assert(write_set.find_offset(ref->get_paddr()) !=
write_set.end());
}
}
void replace_placeholder(CachedExtent& placeholder, CachedExtent& extent) {
ceph_assert(!is_weak());
assert(placeholder.get_type() == extent_types_t::RETIRED_PLACEHOLDER);
assert(extent.get_type() != extent_types_t::RETIRED_PLACEHOLDER);
assert(extent.get_type() != extent_types_t::ROOT);
assert(extent.get_paddr() == placeholder.get_paddr());
{
auto where = read_set.find(placeholder.get_paddr());
assert(where != read_set.end());
assert(where->ref.get() == &placeholder);
where = read_set.erase(where);
auto it = read_set.emplace_hint(where, this, &extent);
extent.transactions.insert(const_cast<read_set_item_t<Transaction>&>(*it));
}
{
auto where = retired_set.find(&placeholder);
assert(where != retired_set.end());
assert(where->get() == &placeholder);
where = retired_set.erase(where);
retired_set.emplace_hint(where, &extent);
}
}
auto get_delayed_alloc_list() {
std::list<LogicalCachedExtentRef> ret;
for (auto& extent : delayed_alloc_list) {
// delayed extents may be invalidated
if (extent->is_valid()) {
ret.push_back(std::move(extent));
} else {
++num_delayed_invalid_extents;
}
}
delayed_alloc_list.clear();
return ret;
}
auto get_valid_pre_alloc_list() {
std::list<LogicalCachedExtentRef> ret;
assert(num_allocated_invalid_extents == 0);
for (auto& extent : pre_alloc_list) {
if (extent->is_valid()) {
ret.push_back(extent);
} else {
++num_allocated_invalid_extents;
}
}
return ret;
}
const auto &get_inline_block_list() {
return inline_block_list;
}
const auto &get_mutated_block_list() {
return mutated_block_list;
}
const auto &get_existing_block_list() {
return existing_block_list;
}
const auto &get_retired_set() {
return retired_set;
}
bool is_retired(paddr_t paddr, extent_len_t len) {
if (retired_set.empty()) {
return false;
}
auto iter = retired_set.lower_bound(paddr);
if (iter == retired_set.end() ||
(*iter)->get_paddr() > paddr) {
assert(iter != retired_set.begin());
--iter;
}
auto retired_paddr = (*iter)->get_paddr();
auto retired_length = (*iter)->get_length();
return retired_paddr <= paddr &&
retired_paddr.add_offset(retired_length) >= paddr.add_offset(len);
}
template <typename F>
auto for_each_fresh_block(F &&f) const {
std::for_each(written_ool_block_list.begin(), written_ool_block_list.end(), f);
std::for_each(inline_block_list.begin(), inline_block_list.end(), f);
}
const io_stat_t& get_fresh_block_stats() const {
return fresh_block_stats;
}
using src_t = transaction_type_t;
src_t get_src() const {
return src;
}
bool is_weak() const {
return weak;
}
void test_set_conflict() {
conflicted = true;
}
bool is_conflicted() const {
return conflicted;
}
auto &get_handle() {
return handle;
}
Transaction(
OrderingHandle &&handle,
bool weak,
src_t src,
journal_seq_t initiated_after,
on_destruct_func_t&& f,
transaction_id_t trans_id
) : weak(weak),
handle(std::move(handle)),
on_destruct(std::move(f)),
src(src),
trans_id(trans_id)
{}
void invalidate_clear_write_set() {
for (auto &&i: write_set) {
i.set_invalid(*this);
}
write_set.clear();
}
~Transaction() {
on_destruct(*this);
invalidate_clear_write_set();
}
friend class crimson::os::seastore::SeaStore;
friend class TransactionConflictCondition;
void reset_preserve_handle(journal_seq_t initiated_after) {
root.reset();
offset = 0;
delayed_temp_offset = 0;
read_set.clear();
fresh_backref_extents = 0;
invalidate_clear_write_set();
mutated_block_list.clear();
fresh_block_stats = {};
num_delayed_invalid_extents = 0;
num_allocated_invalid_extents = 0;
delayed_alloc_list.clear();
inline_block_list.clear();
written_ool_block_list.clear();
pre_alloc_list.clear();
retired_set.clear();
existing_block_list.clear();
existing_block_stats = {};
onode_tree_stats = {};
omap_tree_stats = {};
lba_tree_stats = {};
backref_tree_stats = {};
ool_write_stats = {};
rewrite_version_stats = {};
conflicted = false;
if (!has_reset) {
has_reset = true;
}
}
bool did_reset() const {
return has_reset;
}
struct tree_stats_t {
uint64_t depth = 0;
uint64_t num_inserts = 0;
uint64_t num_erases = 0;
uint64_t num_updates = 0;
int64_t extents_num_delta = 0;
bool is_clear() const {
return (depth == 0 &&
num_inserts == 0 &&
num_erases == 0 &&
num_updates == 0 &&
extents_num_delta == 0);
}
};
tree_stats_t& get_onode_tree_stats() {
return onode_tree_stats;
}
tree_stats_t& get_omap_tree_stats() {
return omap_tree_stats;
}
tree_stats_t& get_lba_tree_stats() {
return lba_tree_stats;
}
tree_stats_t& get_backref_tree_stats() {
return backref_tree_stats;
}
struct ool_write_stats_t {
io_stat_t extents;
uint64_t md_bytes = 0;
uint64_t num_records = 0;
uint64_t get_data_bytes() const {
return extents.bytes;
}
bool is_clear() const {
return (extents.is_clear() &&
md_bytes == 0 &&
num_records == 0);
}
};
ool_write_stats_t& get_ool_write_stats() {
return ool_write_stats;
}
version_stat_t& get_rewrite_version_stats() {
return rewrite_version_stats;
}
struct existing_block_stats_t {
uint64_t valid_num = 0;
uint64_t clean_num = 0;
uint64_t mutated_num = 0;
void inc(const CachedExtentRef &ref) {
valid_num++;
if (ref->is_exist_clean()) {
clean_num++;
} else {
mutated_num++;
}
}
void dec(const CachedExtentRef &ref) {
valid_num--;
if (ref->is_exist_clean()) {
clean_num--;
} else {
mutated_num--;
}
}
};
existing_block_stats_t& get_existing_block_stats() {
return existing_block_stats;
}
transaction_id_t get_trans_id() const {
return trans_id;
}
private:
friend class Cache;
friend Ref make_test_transaction();
/**
* If set, *this may not be used to perform writes and will not provide
* consistentency allowing operations using to avoid maintaining a read_set.
*/
const bool weak;
RootBlockRef root; ///< ref to root if read or written by transaction
device_off_t offset = 0; ///< relative offset of next block
device_off_t delayed_temp_offset = 0;
/**
* read_set
*
* Holds a reference (with a refcount) to every extent read via *this.
* Submitting a transaction mutating any contained extent/addr will
* invalidate *this.
*/
read_set_t<Transaction> read_set; ///< set of extents read by paddr
uint64_t fresh_backref_extents = 0; // counter of new backref extents
/**
* write_set
*
* Contains a reference (without a refcount) to every extent mutated
* as part of *this. No contained extent may be referenced outside
* of *this. Every contained extent will be in one of inline_block_list,
* written_ool_block_list or/and pre_alloc_list, mutated_block_list,
* or delayed_alloc_list.
*/
ExtentIndex write_set;
/**
* lists of fresh blocks, holds refcounts, subset of write_set
*/
io_stat_t fresh_block_stats;
uint64_t num_delayed_invalid_extents = 0;
uint64_t num_allocated_invalid_extents = 0;
/// blocks that will be committed with journal record inline
std::list<CachedExtentRef> inline_block_list;
/// blocks that will be committed with out-of-line record
std::list<CachedExtentRef> written_ool_block_list;
/// blocks with delayed allocation, may become inline or ool above
std::list<LogicalCachedExtentRef> delayed_alloc_list;
/// Extents with pre-allocated addresses,
/// will be added to written_ool_block_list after write
std::list<LogicalCachedExtentRef> pre_alloc_list;
/// list of mutated blocks, holds refcounts, subset of write_set
std::list<CachedExtentRef> mutated_block_list;
/// partial blocks of extents on disk, with data and refcounts
std::list<CachedExtentRef> existing_block_list;
existing_block_stats_t existing_block_stats;
/**
* retire_set
*
* Set of extents retired by *this.
*/
pextent_set_t retired_set;
/// stats to collect when commit or invalidate
tree_stats_t onode_tree_stats;
tree_stats_t omap_tree_stats; // exclude omap tree depth
tree_stats_t lba_tree_stats;
tree_stats_t backref_tree_stats;
ool_write_stats_t ool_write_stats;
version_stat_t rewrite_version_stats;
bool conflicted = false;
bool has_reset = false;
OrderingHandle handle;
on_destruct_func_t on_destruct;
const src_t src;
transaction_id_t trans_id = TRANS_ID_NULL;
};
using TransactionRef = Transaction::Ref;
/// Should only be used with dummy staged-fltree node extent manager
inline TransactionRef make_test_transaction() {
static transaction_id_t next_id = 0;
return std::make_unique<Transaction>(
get_dummy_ordering_handle(),
false,
Transaction::src_t::MUTATE,
JOURNAL_SEQ_NULL,
[](Transaction&) {},
++next_id
);
}
struct TransactionConflictCondition {
class transaction_conflict final : public std::exception {
public:
const char* what() const noexcept final {
return "transaction conflict detected";
}
};
public:
TransactionConflictCondition(Transaction &t) : t(t) {}
template <typename Fut>
std::optional<Fut> may_interrupt() {
if (t.conflicted) {
return seastar::futurize<Fut>::make_exception_future(
transaction_conflict());
} else {
return std::optional<Fut>();
}
}
template <typename T>
static constexpr bool is_interruption_v =
std::is_same_v<T, transaction_conflict>;
static bool is_interruption(std::exception_ptr& eptr) {
return *eptr.__cxa_exception_type() == typeid(transaction_conflict);
}
private:
Transaction &t;
};
using trans_intr = crimson::interruptible::interruptor<
TransactionConflictCondition
>;
template <typename E>
using trans_iertr =
crimson::interruptible::interruptible_errorator<
TransactionConflictCondition,
E
>;
template <typename F, typename... Args>
auto with_trans_intr(Transaction &t, F &&f, Args&&... args) {
return trans_intr::with_interruption_to_error<crimson::ct_error::eagain>(
std::move(f),
TransactionConflictCondition(t),
t,
std::forward<Args>(args)...);
}
template <typename T>
using with_trans_ertr = typename T::base_ertr::template extend<crimson::ct_error::eagain>;
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::io_stat_t> : fmt::ostream_formatter {};
#endif
| 17,815 | 26.24159 | 96 | h |
null | ceph-main/src/crimson/os/seastore/transaction_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <optional>
#include <vector>
#include <utility>
#include <functional>
#include <boost/intrusive_ptr.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "include/buffer.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/lba_manager.h"
#include "crimson/os/seastore/backref_manager.h"
#include "crimson/os/seastore/journal.h"
#include "crimson/os/seastore/extent_placement_manager.h"
#include "crimson/os/seastore/device.h"
namespace crimson::os::seastore {
class Journal;
template <typename F>
auto repeat_eagain(F &&f) {
return seastar::do_with(
std::forward<F>(f),
[](auto &f)
{
return crimson::repeat([&f] {
return std::invoke(f
).safe_then([] {
return seastar::stop_iteration::yes;
}).handle_error(
[](const crimson::ct_error::eagain &e) {
return seastar::stop_iteration::no;
},
crimson::ct_error::pass_further_all{}
);
});
});
}
/**
* TransactionManager
*
* Abstraction hiding reading and writing to persistence.
* Exposes transaction based interface with read isolation.
*/
class TransactionManager : public ExtentCallbackInterface {
public:
TransactionManager(
JournalRef journal,
CacheRef cache,
LBAManagerRef lba_manager,
ExtentPlacementManagerRef &&epm,
BackrefManagerRef&& backref_manager);
/// Writes initial metadata to disk
using mkfs_ertr = base_ertr;
mkfs_ertr::future<> mkfs();
/// Reads initial metadata from disk
using mount_ertr = base_ertr;
mount_ertr::future<> mount();
/// Closes transaction_manager
using close_ertr = base_ertr;
close_ertr::future<> close();
/// Resets transaction
void reset_transaction_preserve_handle(Transaction &t) {
return cache->reset_transaction_preserve_handle(t);
}
/**
* get_pin
*
* Get the logical pin at offset
*/
using get_pin_iertr = LBAManager::get_mapping_iertr;
using get_pin_ret = LBAManager::get_mapping_iertr::future<LBAMappingRef>;
get_pin_ret get_pin(
Transaction &t,
laddr_t offset) {
LOG_PREFIX(TransactionManager::get_pin);
SUBTRACET(seastore_tm, "{}", t, offset);
return lba_manager->get_mapping(t, offset);
}
/**
* get_pins
*
* Get logical pins overlapping offset~length
*/
using get_pins_iertr = LBAManager::get_mappings_iertr;
using get_pins_ret = get_pins_iertr::future<lba_pin_list_t>;
get_pins_ret get_pins(
Transaction &t,
laddr_t offset,
extent_len_t length) {
LOG_PREFIX(TransactionManager::get_pins);
SUBDEBUGT(seastore_tm, "{}~{}", t, offset, length);
return lba_manager->get_mappings(
t, offset, length);
}
/**
* read_extent
*
* Read extent of type T at offset~length
*/
using read_extent_iertr = get_pin_iertr;
template <typename T>
using read_extent_ret = read_extent_iertr::future<
TCachedExtentRef<T>>;
template <typename T>
read_extent_ret<T> read_extent(
Transaction &t,
laddr_t offset,
extent_len_t length) {
LOG_PREFIX(TransactionManager::read_extent);
SUBTRACET(seastore_tm, "{}~{}", t, offset, length);
return get_pin(
t, offset
).si_then([this, FNAME, &t, offset, length] (auto pin)
-> read_extent_ret<T> {
if (length != pin->get_length() || !pin->get_val().is_real()) {
SUBERRORT(seastore_tm,
"offset {} len {} got wrong pin {}",
t, offset, length, *pin);
ceph_assert(0 == "Should be impossible");
}
return this->read_pin<T>(t, std::move(pin));
});
}
/**
* read_extent
*
* Read extent of type T at offset
*/
template <typename T>
read_extent_ret<T> read_extent(
Transaction &t,
laddr_t offset) {
LOG_PREFIX(TransactionManager::read_extent);
SUBTRACET(seastore_tm, "{}", t, offset);
return get_pin(
t, offset
).si_then([this, FNAME, &t, offset] (auto pin)
-> read_extent_ret<T> {
if (!pin->get_val().is_real()) {
SUBERRORT(seastore_tm,
"offset {} got wrong pin {}",
t, offset, *pin);
ceph_assert(0 == "Should be impossible");
}
return this->read_pin<T>(t, std::move(pin));
});
}
template <typename T>
base_iertr::future<TCachedExtentRef<T>> read_pin(
Transaction &t,
LBAMappingRef pin)
{
auto v = pin->get_logical_extent(t);
if (v.has_child()) {
return v.get_child_fut().safe_then([](auto extent) {
return extent->template cast<T>();
});
} else {
return pin_to_extent<T>(t, std::move(pin));
}
}
base_iertr::future<LogicalCachedExtentRef> read_pin_by_type(
Transaction &t,
LBAMappingRef pin,
extent_types_t type)
{
auto v = pin->get_logical_extent(t);
if (v.has_child()) {
return std::move(v.get_child_fut());
} else {
return pin_to_extent_by_type(t, std::move(pin), type);
}
}
/// Obtain mutable copy of extent
LogicalCachedExtentRef get_mutable_extent(Transaction &t, LogicalCachedExtentRef ref) {
LOG_PREFIX(TransactionManager::get_mutable_extent);
auto ret = cache->duplicate_for_write(
t,
ref)->cast<LogicalCachedExtent>();
if (!ret->has_laddr()) {
SUBDEBUGT(seastore_tm,
"duplicating extent for write -- {} -> {}",
t,
*ref,
*ret);
ret->set_laddr(ref->get_laddr());
} else {
SUBTRACET(seastore_tm,
"extent is already duplicated -- {}",
t,
*ref);
assert(ref->is_mutable());
assert(&*ref == &*ret);
}
return ret;
}
using ref_iertr = LBAManager::ref_iertr;
using ref_ret = ref_iertr::future<unsigned>;
/// Add refcount for ref
ref_ret inc_ref(
Transaction &t,
LogicalCachedExtentRef &ref);
/// Add refcount for offset
ref_ret inc_ref(
Transaction &t,
laddr_t offset);
/// Remove refcount for ref
ref_ret dec_ref(
Transaction &t,
LogicalCachedExtentRef &ref);
/// Remove refcount for offset
ref_ret dec_ref(
Transaction &t,
laddr_t offset);
/// remove refcount for list of offset
using refs_ret = ref_iertr::future<std::vector<unsigned>>;
refs_ret dec_ref(
Transaction &t,
std::vector<laddr_t> offsets);
/**
* alloc_extent
*
* Allocates a new block of type T with the minimum lba range of size len
* greater than laddr_hint.
*/
using alloc_extent_iertr = LBAManager::alloc_extent_iertr;
template <typename T>
using alloc_extent_ret = alloc_extent_iertr::future<TCachedExtentRef<T>>;
template <typename T>
alloc_extent_ret<T> alloc_extent(
Transaction &t,
laddr_t laddr_hint,
extent_len_t len,
placement_hint_t placement_hint = placement_hint_t::HOT) {
LOG_PREFIX(TransactionManager::alloc_extent);
SUBTRACET(seastore_tm, "{} len={}, placement_hint={}, laddr_hint={}",
t, T::TYPE, len, placement_hint, laddr_hint);
ceph_assert(is_aligned(laddr_hint, epm->get_block_size()));
auto ext = cache->alloc_new_extent<T>(
t,
len,
placement_hint,
INIT_GENERATION);
return lba_manager->alloc_extent(
t,
laddr_hint,
len,
ext->get_paddr(),
ext.get()
).si_then([ext=std::move(ext), laddr_hint, &t](auto &&) mutable {
LOG_PREFIX(TransactionManager::alloc_extent);
SUBDEBUGT(seastore_tm, "new extent: {}, laddr_hint: {}", t, *ext, laddr_hint);
return alloc_extent_iertr::make_ready_future<TCachedExtentRef<T>>(
std::move(ext));
});
}
/**
* remap_pin
*
* Remap original extent to new extents.
* Return the pins of new extent.
*/
struct remap_entry {
extent_len_t offset;
extent_len_t len;
remap_entry(extent_len_t _offset, extent_len_t _len) {
offset = _offset;
len = _len;
}
};
using remap_pin_iertr = base_iertr;
template <std::size_t N>
using remap_pin_ret = remap_pin_iertr::future<std::array<LBAMappingRef, N>>;
template <typename T, std::size_t N>
remap_pin_ret<N> remap_pin(
Transaction &t,
LBAMappingRef &&pin,
std::array<remap_entry, N> remaps) {
#ifndef NDEBUG
std::sort(remaps.begin(), remaps.end(),
[](remap_entry x, remap_entry y) {
return x.offset < y.offset;
});
auto original_len = pin->get_length();
extent_len_t total_remap_len = 0;
extent_len_t last_offset = 0;
extent_len_t last_len = 0;
for (auto &remap : remaps) {
auto remap_offset = remap.offset;
auto remap_len = remap.len;
total_remap_len += remap.len;
ceph_assert(remap_offset >= (last_offset + last_len));
last_offset = remap_offset;
last_len = remap_len;
}
ceph_assert(total_remap_len < original_len);
#endif
// FIXME: paddr can be absolute and pending
ceph_assert(pin->get_val().is_absolute());
return cache->get_extent_if_cached(
t, pin->get_val(), T::TYPE
).si_then([this, &t, remaps,
original_laddr = pin->get_key(),
original_paddr = pin->get_val(),
original_len = pin->get_length()](auto ext) {
std::optional<ceph::bufferptr> original_bptr;
LOG_PREFIX(TransactionManager::remap_pin);
SUBDEBUGT(seastore_tm,
"original laddr: {}, original paddr: {}, original length: {},"
" remap to {} extents",
t, original_laddr, original_paddr, original_len, remaps.size());
if (ext) {
// FIXME: cannot and will not remap a dirty extent for now.
ceph_assert(!ext->is_dirty());
ceph_assert(!ext->is_mutable());
ceph_assert(ext->get_length() == original_len);
original_bptr = ext->get_bptr();
}
return seastar::do_with(
std::array<LBAMappingRef, N>(),
0,
std::move(original_bptr),
std::vector<remap_entry>(remaps.begin(), remaps.end()),
[this, &t, original_laddr, original_paddr, original_len]
(auto &ret, auto &count, auto &original_bptr, auto &remaps) {
return dec_ref(t, original_laddr
).si_then([this, &t, &original_bptr, &ret, &count, &remaps,
original_laddr, original_paddr, original_len](auto) {
return trans_intr::do_for_each(
remaps.begin(),
remaps.end(),
[this, &t, &original_bptr, &ret, &count,
original_laddr, original_paddr, original_len](auto &remap) {
LOG_PREFIX(TransactionManager::remap_pin);
auto remap_offset = remap.offset;
auto remap_len = remap.len;
auto remap_laddr = original_laddr + remap_offset;
auto remap_paddr = original_paddr.add_offset(remap_offset);
ceph_assert(remap_len < original_len);
ceph_assert(remap_offset + remap_len <= original_len);
ceph_assert(remap_len != 0);
ceph_assert(remap_offset % cache->get_block_size() == 0);
ceph_assert(remap_len % cache->get_block_size() == 0);
SUBDEBUGT(seastore_tm,
"remap laddr: {}, remap paddr: {}, remap length: {}", t,
remap_laddr, remap_paddr, remap_len);
return alloc_remapped_extent<T>(
t,
remap_laddr,
remap_paddr,
remap_len,
original_laddr,
std::move(original_bptr)
).si_then([&ret, &count, remap_laddr](auto &&npin) {
ceph_assert(npin->get_key() == remap_laddr);
ret[count++] = std::move(npin);
});
});
}).handle_error_interruptible(
remap_pin_iertr::pass_further{},
crimson::ct_error::assert_all{
"TransactionManager::remap_pin hit invalid error"
}
).si_then([&ret, &count] {
ceph_assert(count == N);
return remap_pin_iertr::make_ready_future<
std::array<LBAMappingRef, N>>(std::move(ret));
});
});
});
}
using reserve_extent_iertr = alloc_extent_iertr;
using reserve_extent_ret = reserve_extent_iertr::future<LBAMappingRef>;
reserve_extent_ret reserve_region(
Transaction &t,
laddr_t hint,
extent_len_t len) {
LOG_PREFIX(TransactionManager::reserve_region);
SUBDEBUGT(seastore_tm, "len={}, laddr_hint={}", t, len, hint);
ceph_assert(is_aligned(hint, epm->get_block_size()));
return lba_manager->alloc_extent(
t,
hint,
len,
P_ADDR_ZERO,
nullptr);
}
/* alloc_extents
*
* allocates more than one new blocks of type T.
*/
using alloc_extents_iertr = alloc_extent_iertr;
template<class T>
alloc_extents_iertr::future<std::vector<TCachedExtentRef<T>>>
alloc_extents(
Transaction &t,
laddr_t hint,
extent_len_t len,
int num) {
LOG_PREFIX(TransactionManager::alloc_extents);
SUBDEBUGT(seastore_tm, "len={}, laddr_hint={}, num={}",
t, len, hint, num);
return seastar::do_with(std::vector<TCachedExtentRef<T>>(),
[this, &t, hint, len, num] (auto &extents) {
return trans_intr::do_for_each(
boost::make_counting_iterator(0),
boost::make_counting_iterator(num),
[this, &t, len, hint, &extents] (auto i) {
return alloc_extent<T>(t, hint, len).si_then(
[&extents](auto &&node) {
extents.push_back(node);
});
}).si_then([&extents] {
return alloc_extents_iertr::make_ready_future
<std::vector<TCachedExtentRef<T>>>(std::move(extents));
});
});
}
/**
* submit_transaction
*
* Atomically submits transaction to persistence
*/
using submit_transaction_iertr = base_iertr;
submit_transaction_iertr::future<> submit_transaction(Transaction &);
/**
* flush
*
* Block until all outstanding IOs on handle are committed.
* Note, flush() machinery must go through the same pipeline
* stages and locks as submit_transaction.
*/
seastar::future<> flush(OrderingHandle &handle);
/*
* ExtentCallbackInterface
*/
/// weak transaction should be type READ
TransactionRef create_transaction(
Transaction::src_t src,
const char* name,
bool is_weak=false) final {
return cache->create_transaction(src, name, is_weak);
}
using ExtentCallbackInterface::submit_transaction_direct_ret;
submit_transaction_direct_ret submit_transaction_direct(
Transaction &t,
std::optional<journal_seq_t> seq_to_trim = std::nullopt) final;
using ExtentCallbackInterface::get_next_dirty_extents_ret;
get_next_dirty_extents_ret get_next_dirty_extents(
Transaction &t,
journal_seq_t seq,
size_t max_bytes) final;
using ExtentCallbackInterface::rewrite_extent_ret;
rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent,
rewrite_gen_t target_generation,
sea_time_point modify_time) final;
using ExtentCallbackInterface::get_extents_if_live_ret;
get_extents_if_live_ret get_extents_if_live(
Transaction &t,
extent_types_t type,
paddr_t paddr,
laddr_t laddr,
extent_len_t len) final;
/**
* read_root_meta
*
* Read root block meta entry for key.
*/
using read_root_meta_iertr = base_iertr;
using read_root_meta_bare = std::optional<std::string>;
using read_root_meta_ret = read_root_meta_iertr::future<
read_root_meta_bare>;
read_root_meta_ret read_root_meta(
Transaction &t,
const std::string &key) {
return cache->get_root(
t
).si_then([&key, &t](auto root) {
LOG_PREFIX(TransactionManager::read_root_meta);
auto meta = root->root.get_meta();
auto iter = meta.find(key);
if (iter == meta.end()) {
SUBDEBUGT(seastore_tm, "{} -> nullopt", t, key);
return seastar::make_ready_future<read_root_meta_bare>(std::nullopt);
} else {
SUBDEBUGT(seastore_tm, "{} -> {}", t, key, iter->second);
return seastar::make_ready_future<read_root_meta_bare>(iter->second);
}
});
}
/**
* update_root_meta
*
* Update root block meta entry for key to value.
*/
using update_root_meta_iertr = base_iertr;
using update_root_meta_ret = update_root_meta_iertr::future<>;
update_root_meta_ret update_root_meta(
Transaction& t,
const std::string& key,
const std::string& value) {
LOG_PREFIX(TransactionManager::update_root_meta);
SUBDEBUGT(seastore_tm, "seastore_tm, {} -> {}", t, key, value);
return cache->get_root(
t
).si_then([this, &t, &key, &value](RootBlockRef root) {
root = cache->duplicate_for_write(t, root)->cast<RootBlock>();
auto meta = root->root.get_meta();
meta[key] = value;
root->root.set_meta(meta);
return seastar::now();
});
}
/**
* read_onode_root
*
* Get onode-tree root logical address
*/
using read_onode_root_iertr = base_iertr;
using read_onode_root_ret = read_onode_root_iertr::future<laddr_t>;
read_onode_root_ret read_onode_root(Transaction &t) {
return cache->get_root(t).si_then([&t](auto croot) {
LOG_PREFIX(TransactionManager::read_onode_root);
laddr_t ret = croot->get_root().onode_root;
SUBTRACET(seastore_tm, "{}", t, ret);
return ret;
});
}
/**
* write_onode_root
*
* Write onode-tree root logical address, must be called after read.
*/
void write_onode_root(Transaction &t, laddr_t addr) {
LOG_PREFIX(TransactionManager::write_onode_root);
SUBDEBUGT(seastore_tm, "{}", t, addr);
auto croot = cache->get_root_fast(t);
croot = cache->duplicate_for_write(t, croot)->cast<RootBlock>();
croot->get_root().onode_root = addr;
}
/**
* read_collection_root
*
* Get collection root addr
*/
using read_collection_root_iertr = base_iertr;
using read_collection_root_ret = read_collection_root_iertr::future<
coll_root_t>;
read_collection_root_ret read_collection_root(Transaction &t) {
return cache->get_root(t).si_then([&t](auto croot) {
LOG_PREFIX(TransactionManager::read_collection_root);
auto ret = croot->get_root().collection_root.get();
SUBTRACET(seastore_tm, "{}~{}",
t, ret.get_location(), ret.get_size());
return ret;
});
}
/**
* write_collection_root
*
* Update collection root addr
*/
void write_collection_root(Transaction &t, coll_root_t cmroot) {
LOG_PREFIX(TransactionManager::write_collection_root);
SUBDEBUGT(seastore_tm, "{}~{}",
t, cmroot.get_location(), cmroot.get_size());
auto croot = cache->get_root_fast(t);
croot = cache->duplicate_for_write(t, croot)->cast<RootBlock>();
croot->get_root().collection_root.update(cmroot);
}
extent_len_t get_block_size() const {
return epm->get_block_size();
}
store_statfs_t store_stat() const {
return epm->get_stat();
}
~TransactionManager();
private:
friend class Transaction;
CacheRef cache;
LBAManagerRef lba_manager;
JournalRef journal;
ExtentPlacementManagerRef epm;
BackrefManagerRef backref_manager;
WritePipeline write_pipeline;
rewrite_extent_ret rewrite_logical_extent(
Transaction& t,
LogicalCachedExtentRef extent);
submit_transaction_direct_ret do_submit_transaction(
Transaction &t,
ExtentPlacementManager::dispatch_result_t dispatch_result,
std::optional<journal_seq_t> seq_to_trim = std::nullopt);
/**
* pin_to_extent
*
* Get extent mapped at pin.
*/
using pin_to_extent_iertr = base_iertr;
template <typename T>
using pin_to_extent_ret = pin_to_extent_iertr::future<
TCachedExtentRef<T>>;
template <typename T>
pin_to_extent_ret<T> pin_to_extent(
Transaction &t,
LBAMappingRef pin) {
LOG_PREFIX(TransactionManager::pin_to_extent);
SUBTRACET(seastore_tm, "getting extent {}", t, *pin);
static_assert(is_logical_type(T::TYPE));
using ret = pin_to_extent_ret<T>;
auto &pref = *pin;
return cache->get_absent_extent<T>(
t,
pref.get_val(),
pref.get_length(),
[pin=std::move(pin)]
(T &extent) mutable {
assert(!extent.has_laddr());
assert(!extent.has_been_invalidated());
assert(!pin->has_been_invalidated());
assert(pin->get_parent());
pin->link_child(&extent);
extent.set_laddr(pin->get_key());
}
).si_then([FNAME, &t](auto ref) mutable -> ret {
SUBTRACET(seastore_tm, "got extent -- {}", t, *ref);
assert(ref->is_fully_loaded());
return pin_to_extent_ret<T>(
interruptible::ready_future_marker{},
std::move(ref));
});
}
/**
* pin_to_extent_by_type
*
* Get extent mapped at pin.
*/
using pin_to_extent_by_type_ret = pin_to_extent_iertr::future<
LogicalCachedExtentRef>;
pin_to_extent_by_type_ret pin_to_extent_by_type(
Transaction &t,
LBAMappingRef pin,
extent_types_t type)
{
LOG_PREFIX(TransactionManager::pin_to_extent_by_type);
SUBTRACET(seastore_tm, "getting extent {} type {}", t, *pin, type);
assert(is_logical_type(type));
auto &pref = *pin;
return cache->get_absent_extent_by_type(
t,
type,
pref.get_val(),
pref.get_key(),
pref.get_length(),
[pin=std::move(pin)](CachedExtent &extent) mutable {
auto &lextent = static_cast<LogicalCachedExtent&>(extent);
assert(!lextent.has_laddr());
assert(!lextent.has_been_invalidated());
assert(!pin->has_been_invalidated());
assert(pin->get_parent());
assert(!pin->get_parent()->is_pending());
pin->link_child(&lextent);
lextent.set_laddr(pin->get_key());
}
).si_then([FNAME, &t](auto ref) {
SUBTRACET(seastore_tm, "got extent -- {}", t, *ref);
assert(ref->is_fully_loaded());
return pin_to_extent_by_type_ret(
interruptible::ready_future_marker{},
std::move(ref->template cast<LogicalCachedExtent>()));
});
}
/**
* alloc_remapped_extent
*
* Allocates a new extent at given remap_paddr that must be absolute and
* use the buffer to fill the new extent if buffer exists. Otherwise, will
* not read disk to fill the new extent.
* Returns the new extent.
*
* Should make sure the end laddr of remap extent <= the end laddr of
* original extent when using this method.
*/
using alloc_remapped_extent_iertr =
alloc_extent_iertr::extend_ertr<Device::read_ertr>;
using alloc_remapped_extent_ret =
alloc_remapped_extent_iertr::future<LBAMappingRef>;
template <typename T>
alloc_remapped_extent_ret alloc_remapped_extent(
Transaction &t,
laddr_t remap_laddr,
paddr_t remap_paddr,
extent_len_t remap_length,
laddr_t original_laddr,
std::optional<ceph::bufferptr> &&original_bptr) {
LOG_PREFIX(TransactionManager::alloc_remapped_extent);
SUBDEBUG(seastore_tm, "alloc remapped extent: remap_laddr: {}, "
"remap_paddr: {}, remap_length: {}, has data in cache: {} ",
remap_laddr, remap_paddr, remap_length,
original_bptr.has_value() ? "true":"false");
auto ext = cache->alloc_remapped_extent<T>(
t,
remap_laddr,
remap_paddr,
remap_length,
original_laddr,
std::move(original_bptr));
return lba_manager->alloc_extent(
t,
remap_laddr,
remap_length,
remap_paddr,
ext.get()
).si_then([remap_laddr, remap_length, remap_paddr](auto &&ref) {
assert(ref->get_key() == remap_laddr);
assert(ref->get_val() == remap_paddr);
assert(ref->get_length() == remap_length);
return alloc_remapped_extent_iertr::make_ready_future
<LBAMappingRef>(std::move(ref));
});
}
public:
// Testing interfaces
auto get_epm() {
return epm.get();
}
auto get_lba_manager() {
return lba_manager.get();
}
auto get_backref_manager() {
return backref_manager.get();
}
auto get_cache() {
return cache.get();
}
auto get_journal() {
return journal.get();
}
};
using TransactionManagerRef = std::unique_ptr<TransactionManager>;
TransactionManagerRef make_transaction_manager(
Device *primary_device,
const std::vector<Device*> &secondary_devices,
bool is_test);
}
| 24,436 | 28.764921 | 89 | h |
null | ceph-main/src/crimson/os/seastore/backref/backref_tree_node.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/btree/fixed_kv_node.h"
namespace crimson::os::seastore::backref {
using backref_node_meta_t = fixed_kv_node_meta_t<paddr_t>;
using backref_node_meta_le_t = fixed_kv_node_meta_le_t<paddr_t>;
constexpr size_t INTERNAL_NODE_CAPACITY = 254;
constexpr size_t LEAF_NODE_CAPACITY = 169;
using BackrefNode = FixedKVNode<paddr_t>;
struct backref_map_val_t {
extent_len_t len = 0; ///< length of extents
laddr_t laddr = 0; ///< logical address of extents
extent_types_t type = extent_types_t::ROOT;
backref_map_val_t() = default;
backref_map_val_t(
extent_len_t len,
laddr_t laddr,
extent_types_t type)
: len(len), laddr(laddr), type(type) {}
bool operator==(const backref_map_val_t& rhs) const noexcept {
return len == rhs.len && laddr == rhs.laddr;
}
};
std::ostream& operator<<(std::ostream &out, const backref_map_val_t& val);
struct backref_map_val_le_t {
extent_len_le_t len = init_extent_len_le(0);
laddr_le_t laddr = laddr_le_t(0);
extent_types_le_t type = 0;
backref_map_val_le_t() = default;
backref_map_val_le_t(const backref_map_val_le_t &) = default;
explicit backref_map_val_le_t(const backref_map_val_t &val)
: len(init_extent_len_le(val.len)),
laddr(val.laddr),
type(extent_types_le_t(val.type)) {}
operator backref_map_val_t() const {
return backref_map_val_t{len, laddr, (extent_types_t)type};
}
};
class BackrefInternalNode
: public FixedKVInternalNode<
INTERNAL_NODE_CAPACITY,
paddr_t, paddr_le_t,
BACKREF_NODE_SIZE,
BackrefInternalNode> {
public:
template <typename... T>
BackrefInternalNode(T&&... t) :
FixedKVInternalNode(std::forward<T>(t)...) {}
static constexpr extent_types_t TYPE = extent_types_t::BACKREF_INTERNAL;
extent_types_t get_type() const final {
return TYPE;
}
};
using BackrefInternalNodeRef = BackrefInternalNode::Ref;
class BackrefLeafNode
: public FixedKVLeafNode<
LEAF_NODE_CAPACITY,
paddr_t, paddr_le_t,
backref_map_val_t, backref_map_val_le_t,
BACKREF_NODE_SIZE,
BackrefLeafNode,
false> {
public:
template <typename... T>
BackrefLeafNode(T&&... t) :
FixedKVLeafNode(std::forward<T>(t)...) {}
static constexpr extent_types_t TYPE = extent_types_t::BACKREF_LEAF;
extent_types_t get_type() const final {
return TYPE;
}
const_iterator insert(
const_iterator iter,
paddr_t key,
backref_map_val_t val,
LogicalCachedExtent*) final {
journal_insert(
iter,
key,
val,
maybe_get_delta_buffer());
return iter;
}
void update(
const_iterator iter,
backref_map_val_t val,
LogicalCachedExtent*) final {
return journal_update(
iter,
val,
maybe_get_delta_buffer());
}
void remove(const_iterator iter) final {
return journal_remove(
iter,
maybe_get_delta_buffer());
}
// backref leaf nodes don't have to resolve relative addresses
void resolve_relative_addrs(paddr_t base) final {}
void node_resolve_vals(iterator from, iterator to) const final {}
void node_unresolve_vals(iterator from, iterator to) const final {}
};
using BackrefLeafNodeRef = BackrefLeafNode::Ref;
} // namespace crimson::os::seastore::backref
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::backref::backref_map_val_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::backref::BackrefInternalNode> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::backref::BackrefLeafNode> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::backref::backref_node_meta_t> : fmt::ostream_formatter {};
#endif
| 3,877 | 27.101449 | 115 | h |
null | ceph-main/src/crimson/os/seastore/backref/btree_backref_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/backref_manager.h"
#include "crimson/os/seastore/backref/backref_tree_node.h"
#include "crimson/os/seastore/btree/fixed_kv_btree.h"
namespace crimson::os::seastore::backref {
constexpr size_t BACKREF_BLOCK_SIZE = 4096;
class BtreeBackrefMapping : public BtreeNodeMapping<paddr_t, laddr_t> {
extent_types_t type;
public:
BtreeBackrefMapping(op_context_t<paddr_t> ctx)
: BtreeNodeMapping(ctx) {}
BtreeBackrefMapping(
op_context_t<paddr_t> ctx,
CachedExtentRef parent,
uint16_t pos,
backref_map_val_t &val,
backref_node_meta_t &&meta)
: BtreeNodeMapping(
ctx,
parent,
pos,
val.laddr,
val.len,
std::forward<backref_node_meta_t>(meta)),
type(val.type)
{}
extent_types_t get_type() const final {
return type;
}
};
using BackrefBtree = FixedKVBtree<
paddr_t, backref_map_val_t, BackrefInternalNode,
BackrefLeafNode, BtreeBackrefMapping, BACKREF_BLOCK_SIZE, false>;
class BtreeBackrefManager : public BackrefManager {
public:
BtreeBackrefManager(Cache &cache)
: cache(cache)
{}
mkfs_ret mkfs(
Transaction &t) final;
get_mapping_ret get_mapping(
Transaction &t,
paddr_t offset) final;
get_mappings_ret get_mappings(
Transaction &t,
paddr_t offset,
paddr_t end) final;
new_mapping_ret new_mapping(
Transaction &t,
paddr_t key,
extent_len_t len,
laddr_t val,
extent_types_t type) final;
merge_cached_backrefs_ret merge_cached_backrefs(
Transaction &t,
const journal_seq_t &limit,
const uint64_t max) final;
remove_mapping_ret remove_mapping(
Transaction &t,
paddr_t offset) final;
check_child_trackers_ret check_child_trackers(Transaction &t) final;
scan_mapped_space_ret scan_mapped_space(
Transaction &t,
scan_mapped_space_func_t &&f) final;
init_cached_extent_ret init_cached_extent(
Transaction &t,
CachedExtentRef e) final;
rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent) final;
Cache::backref_entry_query_mset_t
get_cached_backref_entries_in_range(
paddr_t start,
paddr_t end) final;
retrieve_backref_extents_in_range_ret
retrieve_backref_extents_in_range(
Transaction &t,
paddr_t start,
paddr_t end) final;
void cache_new_backref_extent(
paddr_t paddr,
paddr_t key,
extent_types_t type) final;
private:
Cache &cache;
op_context_t<paddr_t> get_context(Transaction &t) {
return op_context_t<paddr_t>{cache, t};
}
};
} // namespace crimson::os::seastore::backref
| 2,677 | 21.888889 | 71 | h |
null | ceph-main/src/crimson/os/seastore/btree/btree_range_pin.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive/set.hpp>
#include "crimson/common/log.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/seastore_types.h"
namespace crimson::os::seastore {
template <typename node_key_t>
struct op_context_t {
Cache &cache;
Transaction &trans;
};
constexpr uint16_t MAX_FIXEDKVBTREE_DEPTH = 8;
template <typename T>
struct min_max_t {};
template <>
struct min_max_t<laddr_t> {
static constexpr laddr_t max = L_ADDR_MAX;
static constexpr laddr_t min = L_ADDR_MIN;
};
template <>
struct min_max_t<paddr_t> {
static constexpr paddr_t max = P_ADDR_MAX;
static constexpr paddr_t min = P_ADDR_MIN;
};
template <typename bound_t>
struct fixed_kv_node_meta_t {
bound_t begin = min_max_t<bound_t>::min;
bound_t end = min_max_t<bound_t>::min;
depth_t depth = 0;
bool is_parent_of(const fixed_kv_node_meta_t &other) const {
return (depth == other.depth + 1) &&
(begin <= other.begin) &&
(end > other.begin);
}
bool is_in_range(const bound_t key) const {
return begin <= key && end > key;
}
std::pair<fixed_kv_node_meta_t, fixed_kv_node_meta_t> split_into(bound_t pivot) const {
return std::make_pair(
fixed_kv_node_meta_t{begin, pivot, depth},
fixed_kv_node_meta_t{pivot, end, depth});
}
static fixed_kv_node_meta_t merge_from(
const fixed_kv_node_meta_t &lhs, const fixed_kv_node_meta_t &rhs) {
ceph_assert(lhs.depth == rhs.depth);
return fixed_kv_node_meta_t{lhs.begin, rhs.end, lhs.depth};
}
static std::pair<fixed_kv_node_meta_t, fixed_kv_node_meta_t>
rebalance(const fixed_kv_node_meta_t &lhs, const fixed_kv_node_meta_t &rhs, bound_t pivot) {
ceph_assert(lhs.depth == rhs.depth);
return std::make_pair(
fixed_kv_node_meta_t{lhs.begin, pivot, lhs.depth},
fixed_kv_node_meta_t{pivot, rhs.end, lhs.depth});
}
bool is_root() const {
return begin == min_max_t<bound_t>::min && end == min_max_t<bound_t>::max;
}
};
template <typename bound_t>
inline std::ostream &operator<<(
std::ostream &lhs,
const fixed_kv_node_meta_t<bound_t> &rhs)
{
return lhs << "btree_node_meta_t("
<< "begin=" << rhs.begin
<< ", end=" << rhs.end
<< ", depth=" << rhs.depth
<< ")";
}
/**
* fixed_kv_node_meta_le_t
*
* On disk layout for fixed_kv_node_meta_t
*/
template <typename bound_le_t>
struct fixed_kv_node_meta_le_t {
bound_le_t begin = bound_le_t(0);
bound_le_t end = bound_le_t(0);
depth_le_t depth = init_depth_le(0);
fixed_kv_node_meta_le_t() = default;
fixed_kv_node_meta_le_t(
const fixed_kv_node_meta_le_t<bound_le_t> &) = default;
explicit fixed_kv_node_meta_le_t(
const fixed_kv_node_meta_t<typename bound_le_t::orig_type> &val)
: begin(val.begin),
end(val.end),
depth(init_depth_le(val.depth)) {}
operator fixed_kv_node_meta_t<typename bound_le_t::orig_type>() const {
return fixed_kv_node_meta_t<typename bound_le_t::orig_type>{
begin, end, depth };
}
};
template <typename key_t, typename val_t>
class BtreeNodeMapping : public PhysicalNodeMapping<key_t, val_t> {
op_context_t<key_t> ctx;
/**
* parent
*
* populated until link_extent is called to ensure cache residence
* until add_pin is called.
*/
CachedExtentRef parent;
val_t value;
extent_len_t len;
fixed_kv_node_meta_t<key_t> range;
uint16_t pos = std::numeric_limits<uint16_t>::max();
public:
using val_type = val_t;
BtreeNodeMapping(op_context_t<key_t> ctx) : ctx(ctx) {}
BtreeNodeMapping(
op_context_t<key_t> ctx,
CachedExtentRef parent,
uint16_t pos,
val_t &value,
extent_len_t len,
fixed_kv_node_meta_t<key_t> &&meta)
: ctx(ctx),
parent(parent),
value(value),
len(len),
range(std::move(meta)),
pos(pos)
{
if (!parent->is_pending()) {
this->child_pos = {parent, pos};
}
}
CachedExtentRef get_parent() const final {
return parent;
}
CachedExtentRef get_parent() {
return parent;
}
void set_parent(CachedExtentRef ext) {
parent = ext;
}
uint16_t get_pos() const final {
return pos;
}
extent_len_t get_length() const final {
ceph_assert(range.end > range.begin);
return len;
}
extent_types_t get_type() const override {
ceph_abort("should never happen");
return extent_types_t::ROOT;
}
val_t get_val() const final {
return value;
}
key_t get_key() const final {
return range.begin;
}
PhysicalNodeMappingRef<key_t, val_t> duplicate() const final {
auto ret = std::unique_ptr<BtreeNodeMapping<key_t, val_t>>(
new BtreeNodeMapping<key_t, val_t>(ctx));
ret->range = range;
ret->value = value;
ret->parent = parent;
ret->len = len;
ret->pos = pos;
return ret;
}
bool has_been_invalidated() const final {
return parent->has_been_invalidated();
}
get_child_ret_t<LogicalCachedExtent> get_logical_extent(Transaction&) final;
};
}
| 5,120 | 23.270142 | 94 | h |
null | ceph-main/src/crimson/os/seastore/collection_manager/collection_flat_node.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/collection_manager.h"
namespace crimson::os::seastore::collection_manager {
struct coll_context_t {
TransactionManager &tm;
Transaction &t;
};
using base_coll_map_t = std::map<denc_coll_t, uint32_t>;
struct coll_map_t : base_coll_map_t {
auto insert(coll_t coll, unsigned bits) {
return emplace(
std::make_pair(denc_coll_t{coll}, bits)
);
}
void update(coll_t coll, unsigned bits) {
(*this)[denc_coll_t{coll}] = bits;
}
void remove(coll_t coll) {
erase(denc_coll_t{coll});
}
};
struct delta_t {
enum class op_t : uint_fast8_t {
INSERT,
UPDATE,
REMOVE,
INVALID
} op = op_t::INVALID;
denc_coll_t coll;
uint32_t bits = 0;
DENC(delta_t, v, p) {
DENC_START(1, 1, p);
denc(v.op, p);
denc(v.coll, p);
denc(v.bits, p);
DENC_FINISH(p);
}
void replay(coll_map_t &l) const;
};
}
WRITE_CLASS_DENC(crimson::os::seastore::collection_manager::delta_t)
namespace crimson::os::seastore::collection_manager {
class delta_buffer_t {
std::vector<delta_t> buffer;
public:
bool empty() const {
return buffer.empty();
}
void insert(coll_t coll, uint32_t bits) {
buffer.push_back(delta_t{delta_t::op_t::INSERT, denc_coll_t(coll), bits});
}
void update(coll_t coll, uint32_t bits) {
buffer.push_back(delta_t{delta_t::op_t::UPDATE, denc_coll_t(coll), bits});
}
void remove(coll_t coll) {
buffer.push_back(delta_t{delta_t::op_t::REMOVE, denc_coll_t(coll), 0});
}
void replay(coll_map_t &l) {
for (auto &i: buffer) {
i.replay(l);
}
}
void clear() { buffer.clear(); }
DENC(delta_buffer_t, v, p) {
DENC_START(1, 1, p);
denc(v.buffer, p);
DENC_FINISH(p);
}
};
}
WRITE_CLASS_DENC(crimson::os::seastore::collection_manager::delta_buffer_t)
namespace crimson::os::seastore::collection_manager {
struct CollectionNode
: LogicalCachedExtent {
using CollectionNodeRef = TCachedExtentRef<CollectionNode>;
bool loaded = false;
template <typename... T>
CollectionNode(T&&... t)
: LogicalCachedExtent(std::forward<T>(t)...) {}
static constexpr extent_types_t type = extent_types_t::COLL_BLOCK;
coll_map_t decoded;
delta_buffer_t delta_buffer;
CachedExtentRef duplicate_for_write(Transaction&) final {
assert(delta_buffer.empty());
return CachedExtentRef(new CollectionNode(*this));
}
delta_buffer_t *maybe_get_delta_buffer() {
return is_mutation_pending() ? &delta_buffer : nullptr;
}
using list_iertr = CollectionManager::list_iertr;
using list_ret = CollectionManager::list_ret;
list_ret list();
enum class create_result_t : uint8_t {
SUCCESS,
OVERFLOW
};
using create_iertr = CollectionManager::create_iertr;
using create_ret = create_iertr::future<create_result_t>;
create_ret create(coll_context_t cc, coll_t coll, unsigned bits);
using remove_iertr = CollectionManager::remove_iertr;
using remove_ret = CollectionManager::remove_ret;
remove_ret remove(coll_context_t cc, coll_t coll);
using update_iertr = CollectionManager::update_iertr;
using update_ret = CollectionManager::update_ret;
update_ret update(coll_context_t cc, coll_t coll, unsigned bits);
void read_to_local() {
if (loaded) return;
bufferlist bl;
bl.append(get_bptr());
auto iter = bl.cbegin();
decode((base_coll_map_t&)decoded, iter);
loaded = true;
}
void copy_to_node() {
bufferlist bl;
encode((base_coll_map_t&)decoded, bl);
auto iter = bl.begin();
auto size = encoded_sizeof((base_coll_map_t&)decoded);
assert(size <= get_bptr().length());
get_bptr().zero();
iter.copy(size, get_bptr().c_str());
}
ceph::bufferlist get_delta() final {
assert(!delta_buffer.empty());
ceph::bufferlist bl;
encode(delta_buffer, bl);
delta_buffer.clear();
return bl;
}
void apply_delta(const ceph::bufferlist &bl) final {
assert(bl.length());
delta_buffer_t buffer;
auto bptr = bl.begin();
decode(buffer, bptr);
buffer.replay(decoded);
copy_to_node();
}
static constexpr extent_types_t TYPE = extent_types_t::COLL_BLOCK;
extent_types_t get_type() const final {
return TYPE;
}
std::ostream &print_detail_l(std::ostream &out) const final;
};
using CollectionNodeRef = CollectionNode::CollectionNodeRef;
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::collection_manager::CollectionNode> : fmt::ostream_formatter {};
#endif
| 4,706 | 24.171123 | 121 | h |
null | ceph-main/src/crimson/os/seastore/collection_manager/flat_collection_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/ceph_assert.h"
#include "crimson/os/seastore/collection_manager.h"
#include "crimson/os/seastore/collection_manager/collection_flat_node.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
namespace crimson::os::seastore::collection_manager {
class FlatCollectionManager : public CollectionManager {
TransactionManager &tm;
coll_context_t get_coll_context(Transaction &t) {
return coll_context_t{tm, t};
}
using get_root_iertr = base_iertr;
using get_root_ret = get_root_iertr::future<CollectionNodeRef>;
get_root_ret get_coll_root(const coll_root_t &coll_root, Transaction &t);
public:
explicit FlatCollectionManager(TransactionManager &tm);
mkfs_ret mkfs(Transaction &t) final;
create_ret create(coll_root_t &coll_root, Transaction &t, coll_t cid,
coll_info_t info) final;
list_ret list(const coll_root_t &coll_root, Transaction &t) final;
remove_ret remove(const coll_root_t &coll_root, Transaction &t, coll_t cid) final;
update_ret update(const coll_root_t &coll_root, Transaction &t, coll_t cid, coll_info_t info) final;
};
using FlatCollectionManagerRef = std::unique_ptr<FlatCollectionManager>;
}
| 1,346 | 31.071429 | 102 | h |
null | ceph-main/src/crimson/os/seastore/journal/circular_bounded_journal.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/common/log.h"
#include <boost/intrusive_ptr.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "include/buffer.h"
#include "include/denc.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/journal.h"
#include "include/uuid.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
#include <list>
#include "crimson/os/seastore/journal/record_submitter.h"
#include "crimson/os/seastore/journal/circular_journal_space.h"
namespace crimson::os::seastore::journal {
using RBMDevice = random_block_device::RBMDevice;
/**
* CircularBoundedJournal
*
*
* CircularBoundedJournal (CBJournal) is the journal that works like circular
* queue. With CBJournal, Seastore will append some of the records if the size
* of the record is small (most likely metadata), at which point the head
* (written_to) will be moved. Then, eventually, Seastore applies the records
* in CBjournal to RBM (TODO).
*
* - Commit time
* After submit_record is done, written_to is increased(this in-memory value)
* ---written_to represents where the new record will be appended. Note that
* applied_to is not changed here.
*
* - Replay time
* At replay time, CBJournal begins to replay records in CBjournal by reading
* records from dirty_tail. Then, CBJournal examines whether the records is valid
* one by one, at which point written_to is recovered
* if the valid record is founded. Note that applied_to is stored
* permanently when the apply work---applying the records in CBJournal to RBM---
* is done by CBJournal (TODO).
*
* TODO: apply records from CircularBoundedJournal to RandomBlockManager
*
*/
constexpr uint64_t DEFAULT_BLOCK_SIZE = 4096;
class CircularBoundedJournal : public Journal {
public:
CircularBoundedJournal(
JournalTrimmer &trimmer, RBMDevice* device, const std::string &path);
~CircularBoundedJournal() {}
JournalTrimmer &get_trimmer() final {
return trimmer;
}
open_for_mkfs_ret open_for_mkfs() final;
open_for_mount_ret open_for_mount() final;
close_ertr::future<> close() final;
journal_type_t get_type() final {
return journal_type_t::RANDOM_BLOCK;
}
submit_record_ret submit_record(
record_t &&record,
OrderingHandle &handle
) final;
seastar::future<> flush(
OrderingHandle &handle
) final {
// TODO
return seastar::now();
}
replay_ret replay(delta_handler_t &&delta_handler) final;
rbm_abs_addr get_rbm_addr(journal_seq_t seq) const {
return convert_paddr_to_abs_addr(seq.offset);
}
/**
*
* CircularBoundedJournal write
*
* NVMe will support a large block write (< 512KB) with atomic write unit command.
* With this command, we expect that the most of incoming data can be stored
* as a single write call, which has lower overhead than existing
* way that uses a combination of system calls such as write() and sync().
*
*/
seastar::future<> update_journal_tail(
journal_seq_t dirty,
journal_seq_t alloc) {
return cjs.update_journal_tail(dirty, alloc);
}
journal_seq_t get_dirty_tail() const {
return cjs.get_dirty_tail();
}
journal_seq_t get_alloc_tail() const {
return cjs.get_alloc_tail();
}
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
using read_record_ertr = read_ertr;
using read_record_ret = read_record_ertr::future<
std::optional<std::pair<record_group_header_t, bufferlist>>
>;
/*
* read_record
*
* read record from given address
*
* @param paddr_t to read
* @param expected_seq
*
*/
read_record_ret read_record(paddr_t offset, segment_seq_t expected_seq);
read_record_ret return_record(record_group_header_t& header, bufferlist bl);
void set_write_pipeline(WritePipeline *_write_pipeline) final {
write_pipeline = _write_pipeline;
}
device_id_t get_device_id() const {
return cjs.get_device_id();
}
extent_len_t get_block_size() const {
return cjs.get_block_size();
}
rbm_abs_addr get_journal_end() const {
return cjs.get_journal_end();
}
void set_written_to(journal_seq_t seq) {
cjs.set_written_to(seq);
}
journal_seq_t get_written_to() {
return cjs.get_written_to();
}
rbm_abs_addr get_records_start() const {
return cjs.get_records_start();
}
seastar::future<> finish_commit(transaction_type_t type) final;
using cbj_delta_handler_t = std::function<
replay_ertr::future<bool>(
const record_locator_t&,
const delta_info_t&,
sea_time_point modify_time)>;
Journal::replay_ret scan_valid_record_delta(
cbj_delta_handler_t &&delta_handler,
journal_seq_t tail);
submit_record_ret do_submit_record(record_t &&record, OrderingHandle &handle);
// Test interfaces
CircularJournalSpace& get_cjs() {
return cjs;
}
private:
JournalTrimmer &trimmer;
std::string path;
WritePipeline *write_pipeline = nullptr;
/**
* initialized
*
* true after open_device_read_header, set to false in close().
* Indicates that device is open and in-memory header is valid.
*/
bool initialized = false;
// start address where the newest record will be written
// should be in range [get_records_start(), get_journal_end())
// written_to.segment_seq is circulation seq to track
// the sequence to written records
CircularJournalSpace cjs;
RecordSubmitter record_submitter;
};
}
| 5,697 | 26.133333 | 84 | h |
null | ceph-main/src/crimson/os/seastore/journal/circular_journal_space.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <optional>
#include <seastar/core/circular_buffer.hh>
#include <seastar/core/metrics.hh>
#include <seastar/core/shared_future.hh>
#include "include/buffer.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/journal.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
#include "crimson/os/seastore/journal/record_submitter.h"
#include "crimson/os/seastore/async_cleaner.h"
namespace crimson::os::seastore {
class SegmentProvider;
class JournalTrimmer;
}
namespace crimson::os::seastore::journal {
class CircularBoundedJournal;
class CircularJournalSpace : public JournalAllocator {
public:
const std::string& get_name() const final {
return print_name;
}
extent_len_t get_block_size() const final;
bool can_write() const final {
return (device != nullptr);
}
segment_nonce_t get_nonce() const final {
return 0;
}
bool needs_roll(std::size_t length) const final;
roll_ertr::future<> roll() final;
write_ret write(ceph::bufferlist&& to_write) final;
void update_modify_time(record_t& record) final {}
close_ertr::future<> close() final {
return write_header(
).safe_then([this]() -> close_ertr::future<> {
initialized = false;
return close_ertr::now();
}).handle_error(
Journal::open_for_mount_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error write_header"
}
);
}
open_ret open(bool is_mkfs) final;
public:
CircularJournalSpace(RBMDevice * device);
struct cbj_header_t;
using write_ertr = Journal::submit_record_ertr;
/*
* device_write_bl
*
* @param device address to write
* @param bufferlist to write
*
*/
write_ertr::future<> device_write_bl(rbm_abs_addr offset, ceph::bufferlist &bl);
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
using read_header_ertr = read_ertr;
using read_header_ret = read_header_ertr::future<
std::optional<std::pair<cbj_header_t, bufferlist>>
>;
/*
* read_header
*
* read header block from given absolute address
*
* @param absolute address
*
*/
read_header_ret read_header();
ceph::bufferlist encode_header();
write_ertr::future<> write_header();
/**
* CircularBoundedJournal structure
*
* +-------------------------------------------------------+
* | header | record | record | record | record | ... |
* +-------------------------------------------------------+
* ^-----------block aligned-----------------^
* <----fixed---->
*/
struct cbj_header_t {
// start offset of CircularBoundedJournal in the device
journal_seq_t dirty_tail;
journal_seq_t alloc_tail;
DENC(cbj_header_t, v, p) {
DENC_START(1, 1, p);
denc(v.dirty_tail, p);
denc(v.alloc_tail, p);
DENC_FINISH(p);
}
};
/**
*
* Write position for CircularBoundedJournal
*
* | written to rbm | written length to CircularBoundedJournal | new write |
* ----------------->------------------------------------------------>
* ^ ^
* applied_to written_to
*
*/
journal_seq_t get_written_to() const {
return written_to;
}
rbm_abs_addr get_rbm_addr(journal_seq_t seq) const {
return convert_paddr_to_abs_addr(seq.offset);
}
void set_written_to(journal_seq_t seq) {
rbm_abs_addr addr = convert_paddr_to_abs_addr(seq.offset);
assert(addr >= get_records_start());
assert(addr < get_journal_end());
written_to = seq;
}
device_id_t get_device_id() const {
return device->get_device_id();
}
journal_seq_t get_dirty_tail() const {
return header.dirty_tail;
}
journal_seq_t get_alloc_tail() const {
return header.alloc_tail;
}
/*
Size-related interfaces
+---------------------------------------------------------+
| header | record | record | record | record | ... |
+---------------------------------------------------------+
^ ^ ^
| | |
get_journal_start | get_journal_end
get_records_start
<-- get_records_total_size + block_size -->
<--------------- get_journal_size ------------------------>
*/
size_t get_records_used_size() const {
auto rbm_written_to = get_rbm_addr(get_written_to());
auto rbm_tail = get_rbm_addr(get_dirty_tail());
return rbm_written_to >= rbm_tail ?
rbm_written_to - rbm_tail :
rbm_written_to + get_records_total_size() + get_block_size()
- rbm_tail;
}
size_t get_records_total_size() const {
assert(device);
// a block is for header and a block is reserved to denote the end
return device->get_journal_size() - (2 * get_block_size());
}
rbm_abs_addr get_records_start() const {
assert(device);
return device->get_shard_journal_start() + get_block_size();
}
size_t get_records_available_size() const {
return get_records_total_size() - get_records_used_size();
}
bool is_available_size(uint64_t size) {
auto rbm_written_to = get_rbm_addr(get_written_to());
auto rbm_tail = get_rbm_addr(get_dirty_tail());
if (rbm_written_to > rbm_tail &&
(get_journal_end() - rbm_written_to) < size &&
size > (get_records_used_size() -
(get_journal_end() - rbm_written_to))) {
return false;
}
return get_records_available_size() >= size;
}
rbm_abs_addr get_journal_end() const {
assert(device);
return device->get_shard_journal_start() + device->get_journal_size();
}
read_ertr::future<> read(
uint64_t offset,
bufferptr &bptr) {
assert(device);
return device->read(offset, bptr);
}
seastar::future<> update_journal_tail(
journal_seq_t dirty,
journal_seq_t alloc) {
header.dirty_tail = dirty;
header.alloc_tail = alloc;
return write_header(
).handle_error(
crimson::ct_error::assert_all{
"encountered invalid error in update_journal_tail"
});
}
void set_initialized(bool init) {
initialized = init;
}
void set_cbj_header(cbj_header_t& head) {
header = head;
}
cbj_header_t get_cbj_header() {
return header;
}
private:
std::string print_name;
cbj_header_t header;
RBMDevice* device;
journal_seq_t written_to;
bool initialized = false;
};
std::ostream &operator<<(std::ostream &out, const CircularJournalSpace::cbj_header_t &header);
}
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::journal::CircularJournalSpace::cbj_header_t)
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::journal::CircularJournalSpace::cbj_header_t> : fmt::ostream_formatter {};
#endif
| 7,178 | 26.611538 | 130 | h |
null | ceph-main/src/crimson/os/seastore/journal/record_submitter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <optional>
#include <seastar/core/circular_buffer.hh>
#include <seastar/core/metrics.hh>
#include <seastar/core/shared_future.hh>
#include "include/buffer.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/segment_manager_group.h"
#include "crimson/os/seastore/segment_seq_allocator.h"
namespace crimson::os::seastore {
class SegmentProvider;
class JournalTrimmer;
}
namespace crimson::os::seastore::journal {
class JournalAllocator {
public:
using base_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
virtual const std::string& get_name() const = 0;
virtual void update_modify_time(record_t& record) = 0;
virtual extent_len_t get_block_size() const = 0;
using close_ertr = base_ertr;
virtual close_ertr::future<> close() = 0;
virtual segment_nonce_t get_nonce() const = 0;
using write_ertr = base_ertr;
using write_ret = write_ertr::future<write_result_t>;
virtual write_ret write(ceph::bufferlist&& to_write) = 0;
virtual bool can_write() const = 0;
using roll_ertr = base_ertr;
virtual roll_ertr::future<> roll() = 0;
virtual bool needs_roll(std::size_t length) const = 0;
using open_ertr = base_ertr;
using open_ret = open_ertr::future<journal_seq_t>;
virtual open_ret open(bool is_mkfs) = 0;
};
/**
* RecordBatch
*
* Maintain a batch of records for submit.
*/
class RecordBatch {
enum class state_t {
EMPTY = 0,
PENDING,
SUBMITTING
};
public:
RecordBatch() = default;
RecordBatch(RecordBatch&&) = delete;
RecordBatch(const RecordBatch&) = delete;
RecordBatch& operator=(RecordBatch&&) = delete;
RecordBatch& operator=(const RecordBatch&) = delete;
bool is_empty() const {
return state == state_t::EMPTY;
}
bool is_pending() const {
return state == state_t::PENDING;
}
bool is_submitting() const {
return state == state_t::SUBMITTING;
}
std::size_t get_index() const {
return index;
}
std::size_t get_num_records() const {
return pending.get_size();
}
std::size_t get_batch_capacity() const {
return batch_capacity;
}
const record_group_size_t& get_submit_size() const {
assert(state != state_t::EMPTY);
return pending.size;
}
bool needs_flush() const {
assert(state != state_t::SUBMITTING);
assert(pending.get_size() <= batch_capacity);
if (state == state_t::EMPTY) {
return false;
} else {
assert(state == state_t::PENDING);
return (pending.get_size() >= batch_capacity ||
pending.size.get_encoded_length() > batch_flush_size);
}
}
struct evaluation_t {
record_group_size_t submit_size;
bool is_full;
};
evaluation_t evaluate_submit(
const record_size_t& rsize,
extent_len_t block_size) const {
assert(!needs_flush());
auto submit_size = pending.size.get_encoded_length_after(
rsize, block_size);
bool is_full = submit_size.get_encoded_length() > batch_flush_size;
return {submit_size, is_full};
}
void initialize(std::size_t i,
std::size_t _batch_capacity,
std::size_t _batch_flush_size) {
ceph_assert(_batch_capacity > 0);
index = i;
batch_capacity = _batch_capacity;
batch_flush_size = _batch_flush_size;
pending.reserve(batch_capacity);
}
// Add to the batch, the future will be resolved after the batch is
// written.
//
// Set write_result_t::write_length to 0 if the record is not the first one
// in the batch.
using add_pending_ertr = JournalAllocator::write_ertr;
using add_pending_ret = add_pending_ertr::future<record_locator_t>;
add_pending_ret add_pending(
const std::string& name,
record_t&&,
extent_len_t block_size);
// Encode the batched records for write.
std::pair<ceph::bufferlist, record_group_size_t> encode_batch(
const journal_seq_t& committed_to,
segment_nonce_t segment_nonce);
// Set the write result and reset for reuse
using maybe_result_t = std::optional<write_result_t>;
void set_result(maybe_result_t maybe_write_end_seq);
// The fast path that is equivalent to submit a single record as a batch.
//
// Essentially, equivalent to the combined logic of:
// add_pending(), encode_batch() and set_result() above without
// the intervention of the shared io_promise.
//
// Note the current RecordBatch can be reused afterwards.
std::pair<ceph::bufferlist, record_group_size_t> submit_pending_fast(
record_t&&,
extent_len_t block_size,
const journal_seq_t& committed_to,
segment_nonce_t segment_nonce);
private:
record_group_size_t get_encoded_length_after(
const record_t& record,
extent_len_t block_size) const {
return pending.size.get_encoded_length_after(
record.size, block_size);
}
state_t state = state_t::EMPTY;
std::size_t index = 0;
std::size_t batch_capacity = 0;
std::size_t batch_flush_size = 0;
record_group_t pending;
std::size_t submitting_size = 0;
extent_len_t submitting_length = 0;
extent_len_t submitting_mdlength = 0;
struct promise_result_t {
write_result_t write_result;
extent_len_t mdlength;
};
using maybe_promise_result_t = std::optional<promise_result_t>;
std::optional<seastar::shared_promise<maybe_promise_result_t> > io_promise;
};
/**
* RecordSubmitter
*
* Submit records concurrently with RecordBatch with SegmentAllocator.
*
* Configurations and controls:
* - io_depth: the io-depth limit to SegmentAllocator;
* - batch_capacity: the number limit of records in a RecordBatch;
* - batch_flush_size: the bytes threshold to force flush a RecordBatch to
* control the maximum latency;
* - preferred_fullness: the fullness threshold to flush a RecordBatch;
*/
class RecordSubmitter {
enum class state_t {
IDLE = 0, // outstanding_io == 0
PENDING, // outstanding_io < io_depth_limit
FULL // outstanding_io == io_depth_limit
// OVERFLOW: outstanding_io > io_depth_limit is impossible
};
struct grouped_io_stats {
uint64_t num_io = 0;
uint64_t num_io_grouped = 0;
void increment(uint64_t num_grouped_io) {
++num_io;
num_io_grouped += num_grouped_io;
}
};
using base_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
public:
RecordSubmitter(std::size_t io_depth,
std::size_t batch_capacity,
std::size_t batch_flush_size,
double preferred_fullness,
JournalAllocator&);
const std::string& get_name() const {
return journal_allocator.get_name();
}
journal_seq_t get_committed_to() const {
return committed_to;
}
// whether is available to submit a record
bool is_available() const;
// wait for available if cannot submit, should check is_available() again
// when the future is resolved.
using wa_ertr = base_ertr;
wa_ertr::future<> wait_available();
// when available, check for the submit action
// according to the pending record size
enum class action_t {
ROLL,
SUBMIT_FULL,
SUBMIT_NOT_FULL
};
action_t check_action(const record_size_t&) const;
// when available, roll the segment if needed
using roll_segment_ertr = base_ertr;
roll_segment_ertr::future<> roll_segment();
// when available, submit the record if possible
using submit_ertr = base_ertr;
using submit_ret = submit_ertr::future<record_locator_t>;
submit_ret submit(record_t&&, bool with_atomic_roll_segment=false);
void update_committed_to(const journal_seq_t& new_committed_to) {
assert(new_committed_to != JOURNAL_SEQ_NULL);
assert(committed_to == JOURNAL_SEQ_NULL ||
committed_to <= new_committed_to);
committed_to = new_committed_to;
}
// open for write, generate the correct print name, and register metrics
using open_ertr = base_ertr;
using open_ret = open_ertr::future<journal_seq_t>;
open_ret open(bool is_mkfs);
using close_ertr = base_ertr;
close_ertr::future<> close();
private:
void update_state();
void increment_io() {
++num_outstanding_io;
stats.io_depth_stats.increment(num_outstanding_io);
update_state();
}
void decrement_io_with_flush();
void pop_free_batch() {
assert(p_current_batch == nullptr);
assert(!free_batch_ptrs.empty());
p_current_batch = free_batch_ptrs.front();
assert(p_current_batch->is_empty());
assert(p_current_batch == &batches[p_current_batch->get_index()]);
free_batch_ptrs.pop_front();
}
void account_submission(std::size_t, const record_group_size_t&);
using maybe_result_t = RecordBatch::maybe_result_t;
void finish_submit_batch(RecordBatch*, maybe_result_t);
void flush_current_batch();
state_t state = state_t::IDLE;
std::size_t num_outstanding_io = 0;
std::size_t io_depth_limit;
double preferred_fullness;
JournalAllocator& journal_allocator;
// committed_to may be in a previous journal segment
journal_seq_t committed_to = JOURNAL_SEQ_NULL;
std::unique_ptr<RecordBatch[]> batches;
// should not be nullptr after constructed
RecordBatch* p_current_batch = nullptr;
seastar::circular_buffer<RecordBatch*> free_batch_ptrs;
// blocked for rolling or lack of resource
std::optional<seastar::shared_promise<> > wait_available_promise;
bool has_io_error = false;
// when needs flush but io depth is full,
// wait for decrement_io_with_flush()
std::optional<seastar::promise<> > wait_unfull_flush_promise;
struct {
grouped_io_stats record_batch_stats;
grouped_io_stats io_depth_stats;
uint64_t record_group_padding_bytes = 0;
uint64_t record_group_metadata_bytes = 0;
uint64_t record_group_data_bytes = 0;
} stats;
seastar::metrics::metric_group metrics;
};
}
| 9,903 | 27.45977 | 77 | h |
null | ceph-main/src/crimson/os/seastore/journal/segment_allocator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <optional>
#include <seastar/core/circular_buffer.hh>
#include <seastar/core/metrics.hh>
#include <seastar/core/shared_future.hh>
#include "include/buffer.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/segment_manager_group.h"
#include "crimson/os/seastore/segment_seq_allocator.h"
#include "crimson/os/seastore/journal/record_submitter.h"
#include "crimson/os/seastore/async_cleaner.h"
namespace crimson::os::seastore {
class SegmentProvider;
class JournalTrimmer;
}
namespace crimson::os::seastore::journal {
/**
* SegmentAllocator
*
* Maintain an available segment for writes.
*/
class SegmentAllocator : public JournalAllocator {
public:
// SegmentAllocator specific methods
SegmentAllocator(JournalTrimmer *trimmer,
data_category_t category,
rewrite_gen_t gen,
SegmentProvider &sp,
SegmentSeqAllocator &ssa);
segment_id_t get_segment_id() const {
assert(can_write());
return current_segment->get_segment_id();
}
extent_len_t get_max_write_length() const {
return sm_group.get_segment_size() -
sm_group.get_rounded_header_length() -
sm_group.get_rounded_tail_length();
}
public:
// overriding methods
const std::string& get_name() const final {
return print_name;
}
extent_len_t get_block_size() const final {
return sm_group.get_block_size();
}
bool can_write() const final {
return !!current_segment;
}
segment_nonce_t get_nonce() const final {
assert(can_write());
return current_segment_nonce;
}
// returns true iff the current segment has insufficient space
bool needs_roll(std::size_t length) const final {
assert(can_write());
assert(current_segment->get_write_capacity() ==
sm_group.get_segment_size());
auto write_capacity = current_segment->get_write_capacity() -
sm_group.get_rounded_tail_length();
return length + written_to > std::size_t(write_capacity);
}
// open for write and generate the correct print name
open_ret open(bool is_mkfs) final;
// close the current segment and initialize next one
roll_ertr::future<> roll() final;
// write the buffer, return the write result
//
// May be called concurrently, but writes may complete in any order.
// If rolling/opening, no write is allowed.
write_ret write(ceph::bufferlist&& to_write) final;
using close_ertr = base_ertr;
close_ertr::future<> close() final;
void update_modify_time(record_t& record) final {
segment_provider.update_modify_time(
get_segment_id(),
record.modify_time,
record.extents.size());
}
private:
open_ret do_open(bool is_mkfs);
void reset() {
current_segment.reset();
written_to = 0;
current_segment_nonce = 0;
}
using close_segment_ertr = base_ertr;
close_segment_ertr::future<> close_segment();
// device id is not available during construction,
// so generate the print_name later.
std::string print_name;
const segment_type_t type; // JOURNAL or OOL
const data_category_t category;
const rewrite_gen_t gen;
SegmentProvider &segment_provider;
SegmentManagerGroup &sm_group;
SegmentRef current_segment;
segment_off_t written_to;
SegmentSeqAllocator &segment_seq_allocator;
segment_nonce_t current_segment_nonce;
JournalTrimmer *trimmer;
};
}
| 3,545 | 25.863636 | 72 | h |
null | ceph-main/src/crimson/os/seastore/journal/segmented_journal.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "include/buffer.h"
#include "include/denc.h"
#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/journal.h"
#include "crimson/os/seastore/segment_manager_group.h"
#include "crimson/os/seastore/ordering_handle.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/osd/exceptions.h"
#include "segment_allocator.h"
#include "crimson/os/seastore/segment_seq_allocator.h"
#include "record_submitter.h"
namespace crimson::os::seastore::journal {
/**
* Manages stream of atomically written records to a SegmentManager.
*/
class SegmentedJournal : public Journal {
public:
SegmentedJournal(
SegmentProvider &segment_provider,
JournalTrimmer &trimmer);
~SegmentedJournal() {}
JournalTrimmer &get_trimmer() final {
return trimmer;
}
open_for_mkfs_ret open_for_mkfs() final;
open_for_mount_ret open_for_mount() final;
close_ertr::future<> close() final;
submit_record_ret submit_record(
record_t &&record,
OrderingHandle &handle) final;
seastar::future<> flush(OrderingHandle &handle) final;
replay_ret replay(delta_handler_t &&delta_handler) final;
void set_write_pipeline(WritePipeline *_write_pipeline) final {
write_pipeline = _write_pipeline;
}
journal_type_t get_type() final {
return journal_type_t::SEGMENTED;
}
seastar::future<> finish_commit(transaction_type_t type) {
return seastar::now();
}
private:
submit_record_ret do_submit_record(
record_t &&record,
OrderingHandle &handle
);
SegmentSeqAllocatorRef segment_seq_allocator;
SegmentAllocator journal_segment_allocator;
RecordSubmitter record_submitter;
SegmentManagerGroup &sm_group;
JournalTrimmer &trimmer;
WritePipeline* write_pipeline = nullptr;
/// return ordered vector of segments to replay
using replay_segments_t = std::vector<
std::pair<journal_seq_t, segment_header_t>>;
using prep_replay_segments_fut = replay_ertr::future<
replay_segments_t>;
prep_replay_segments_fut prep_replay_segments(
std::vector<std::pair<segment_id_t, segment_header_t>> segments);
/// scan the last segment for tail deltas
using scan_last_segment_ertr = replay_ertr;
scan_last_segment_ertr::future<> scan_last_segment(
const segment_id_t&, const segment_header_t&);
struct replay_stats_t {
std::size_t num_record_groups = 0;
std::size_t num_records = 0;
std::size_t num_alloc_deltas = 0;
std::size_t num_dirty_deltas = 0;
};
/// replays records starting at start through end of segment
replay_ertr::future<>
replay_segment(
journal_seq_t start, ///< [in] starting addr, seq
segment_header_t header, ///< [in] segment header
delta_handler_t &delta_handler, ///< [in] processes deltas in order
replay_stats_t &stats ///< [out] replay stats
);
};
}
| 3,038 | 27.669811 | 72 | h |
null | ceph-main/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "include/buffer_fwd.h"
#include "include/interval_set.h"
#include "common/interval_map.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/btree/fixed_kv_btree.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/lba_manager.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/lba_manager/btree/lba_btree_node.h"
#include "crimson/os/seastore/btree/btree_range_pin.h"
namespace crimson::os::seastore::lba_manager::btree {
class BtreeLBAMapping : public BtreeNodeMapping<laddr_t, paddr_t> {
public:
BtreeLBAMapping(op_context_t<laddr_t> ctx)
: BtreeNodeMapping(ctx) {}
BtreeLBAMapping(
op_context_t<laddr_t> c,
CachedExtentRef parent,
uint16_t pos,
lba_map_val_t &val,
lba_node_meta_t &&meta)
: BtreeNodeMapping(
c,
parent,
pos,
val.paddr,
val.len,
std::forward<lba_node_meta_t>(meta))
{}
};
using LBABtree = FixedKVBtree<
laddr_t, lba_map_val_t, LBAInternalNode,
LBALeafNode, BtreeLBAMapping, LBA_BLOCK_SIZE, true>;
/**
* BtreeLBAManager
*
* Uses a wandering btree to track two things:
* 1) lba state including laddr_t -> paddr_t mapping
* 2) reverse paddr_t -> laddr_t mapping for gc (TODO)
*
* Generally, any transaction will involve
* 1) deltas against lba tree nodes
* 2) new lba tree nodes
* - Note, there must necessarily be a delta linking
* these new nodes into the tree -- might be a
* bootstrap_state_t delta if new root
*
* get_mappings, alloc_extent_*, etc populate a Transaction
* which then gets submitted
*/
class BtreeLBAManager : public LBAManager {
public:
BtreeLBAManager(Cache &cache)
: cache(cache)
{
register_metrics();
}
mkfs_ret mkfs(
Transaction &t) final;
get_mappings_ret get_mappings(
Transaction &t,
laddr_t offset, extent_len_t length) final;
get_mappings_ret get_mappings(
Transaction &t,
laddr_list_t &&list) final;
get_mapping_ret get_mapping(
Transaction &t,
laddr_t offset) final;
alloc_extent_ret alloc_extent(
Transaction &t,
laddr_t hint,
extent_len_t len,
paddr_t addr,
LogicalCachedExtent*) final;
ref_ret decref_extent(
Transaction &t,
laddr_t addr) final {
return update_refcount(t, addr, -1);
}
ref_ret incref_extent(
Transaction &t,
laddr_t addr) final {
return update_refcount(t, addr, 1);
}
/**
* init_cached_extent
*
* Checks whether e is live (reachable from lba tree) and drops or initializes
* accordingly.
*
* Returns if e is live.
*/
init_cached_extent_ret init_cached_extent(
Transaction &t,
CachedExtentRef e) final;
check_child_trackers_ret check_child_trackers(Transaction &t) final;
scan_mappings_ret scan_mappings(
Transaction &t,
laddr_t begin,
laddr_t end,
scan_mappings_func_t &&f) final;
rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent) final;
update_mapping_ret update_mapping(
Transaction& t,
laddr_t laddr,
paddr_t prev_addr,
paddr_t paddr,
LogicalCachedExtent*) final;
get_physical_extent_if_live_ret get_physical_extent_if_live(
Transaction &t,
extent_types_t type,
paddr_t addr,
laddr_t laddr,
extent_len_t len) final;
private:
Cache &cache;
struct {
uint64_t num_alloc_extents = 0;
uint64_t num_alloc_extents_iter_nexts = 0;
} stats;
op_context_t<laddr_t> get_context(Transaction &t) {
return op_context_t<laddr_t>{cache, t};
}
seastar::metrics::metric_group metrics;
void register_metrics();
/**
* update_refcount
*
* Updates refcount, returns resulting refcount
*/
using update_refcount_ret = ref_ret;
update_refcount_ret update_refcount(
Transaction &t,
laddr_t addr,
int delta);
/**
* _update_mapping
*
* Updates mapping, removes if f returns nullopt
*/
using _update_mapping_iertr = ref_iertr;
using _update_mapping_ret = ref_iertr::future<lba_map_val_t>;
using update_func_t = std::function<
lba_map_val_t(const lba_map_val_t &v)
>;
_update_mapping_ret _update_mapping(
Transaction &t,
laddr_t addr,
update_func_t &&f,
LogicalCachedExtent*);
};
using BtreeLBAManagerRef = std::unique_ptr<BtreeLBAManager>;
}
| 4,585 | 22.639175 | 80 | h |
null | ceph-main/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <sys/mman.h>
#include <memory>
#include <string.h>
#include "include/buffer.h"
#include "crimson/common/fixed_kv_node_layout.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/lba_manager.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/btree/btree_range_pin.h"
#include "crimson/os/seastore/btree/fixed_kv_btree.h"
#include "crimson/os/seastore/btree/fixed_kv_node.h"
namespace crimson::os::seastore::lba_manager::btree {
using base_iertr = LBAManager::base_iertr;
using LBANode = FixedKVNode<laddr_t>;
/**
* lba_map_val_t
*
* struct representing a single lba mapping
*/
struct lba_map_val_t {
extent_len_t len = 0; ///< length of mapping
paddr_t paddr; ///< physical addr of mapping
uint32_t refcount = 0; ///< refcount
uint32_t checksum = 0; ///< checksum of original block written at paddr (TODO)
lba_map_val_t() = default;
lba_map_val_t(
extent_len_t len,
paddr_t paddr,
uint32_t refcount,
uint32_t checksum)
: len(len), paddr(paddr), refcount(refcount), checksum(checksum) {}
bool operator==(const lba_map_val_t&) const = default;
};
std::ostream& operator<<(std::ostream& out, const lba_map_val_t&);
constexpr size_t LBA_BLOCK_SIZE = 4096;
using lba_node_meta_t = fixed_kv_node_meta_t<laddr_t>;
using lba_node_meta_le_t = fixed_kv_node_meta_le_t<laddr_le_t>;
/**
* LBAInternalNode
*
* Abstracts operations on and layout of internal nodes for the
* LBA Tree.
*
* Layout (4k):
* size : uint32_t[1] 4b
* (padding) : 4b
* meta : lba_node_meta_le_t[3] (1*24)b
* keys : laddr_t[255] (254*8)b
* values : paddr_t[255] (254*8)b
* = 4096
* TODO: make the above capacity calculation part of FixedKVNodeLayout
* TODO: the above alignment probably isn't portable without further work
*/
constexpr size_t INTERNAL_NODE_CAPACITY = 254;
struct LBAInternalNode
: FixedKVInternalNode<
INTERNAL_NODE_CAPACITY,
laddr_t, laddr_le_t,
LBA_BLOCK_SIZE,
LBAInternalNode> {
using Ref = TCachedExtentRef<LBAInternalNode>;
using internal_iterator_t = const_iterator;
template <typename... T>
LBAInternalNode(T&&... t) :
FixedKVInternalNode(std::forward<T>(t)...) {}
static constexpr extent_types_t TYPE = extent_types_t::LADDR_INTERNAL;
extent_types_t get_type() const final {
return TYPE;
}
};
using LBAInternalNodeRef = LBAInternalNode::Ref;
/**
* LBALeafNode
*
* Abstracts operations on and layout of leaf nodes for the
* LBA Tree.
*
* Layout (4k):
* size : uint32_t[1] 4b
* (padding) : 4b
* meta : lba_node_meta_le_t[3] (1*24)b
* keys : laddr_t[170] (145*8)b
* values : lba_map_val_t[170] (145*20)b
* = 4092
*
* TODO: update FixedKVNodeLayout to handle the above calculation
* TODO: the above alignment probably isn't portable without further work
*/
constexpr size_t LEAF_NODE_CAPACITY = 145;
/**
* lba_map_val_le_t
*
* On disk layout for lba_map_val_t.
*/
struct lba_map_val_le_t {
extent_len_le_t len = init_extent_len_le(0);
paddr_le_t paddr;
ceph_le32 refcount{0};
ceph_le32 checksum{0};
lba_map_val_le_t() = default;
lba_map_val_le_t(const lba_map_val_le_t &) = default;
explicit lba_map_val_le_t(const lba_map_val_t &val)
: len(init_extent_len_le(val.len)),
paddr(paddr_le_t(val.paddr)),
refcount(val.refcount),
checksum(val.checksum) {}
operator lba_map_val_t() const {
return lba_map_val_t{ len, paddr, refcount, checksum };
}
};
struct LBALeafNode
: FixedKVLeafNode<
LEAF_NODE_CAPACITY,
laddr_t, laddr_le_t,
lba_map_val_t, lba_map_val_le_t,
LBA_BLOCK_SIZE,
LBALeafNode,
true> {
using Ref = TCachedExtentRef<LBALeafNode>;
using parent_type_t = FixedKVLeafNode<
LEAF_NODE_CAPACITY,
laddr_t, laddr_le_t,
lba_map_val_t, lba_map_val_le_t,
LBA_BLOCK_SIZE,
LBALeafNode,
true>;
using internal_const_iterator_t =
typename parent_type_t::node_layout_t::const_iterator;
using internal_iterator_t =
typename parent_type_t::node_layout_t::iterator;
template <typename... T>
LBALeafNode(T&&... t) :
parent_type_t(std::forward<T>(t)...) {}
static constexpr extent_types_t TYPE = extent_types_t::LADDR_LEAF;
bool validate_stable_children() final {
LOG_PREFIX(LBALeafNode::validate_stable_children);
if (this->children.empty()) {
return false;
}
for (auto i : *this) {
auto child = (LogicalCachedExtent*)this->children[i.get_offset()];
if (is_valid_child_ptr(child) && child->get_laddr() != i.get_key()) {
SUBERROR(seastore_fixedkv_tree,
"stable child not valid: child {}, key {}",
*child,
i.get_key());
ceph_abort();
return false;
}
}
return true;
}
void update(
internal_const_iterator_t iter,
lba_map_val_t val,
LogicalCachedExtent* nextent) final {
LOG_PREFIX(LBALeafNode::update);
if (nextent) {
SUBTRACE(seastore_fixedkv_tree, "trans.{}, pos {}, {}",
this->pending_for_transaction,
iter.get_offset(),
*nextent);
// child-ptr may already be correct, see LBAManager::update_mappings()
this->update_child_ptr(iter, nextent);
}
val.paddr = this->maybe_generate_relative(val.paddr);
return this->journal_update(
iter,
val,
this->maybe_get_delta_buffer());
}
internal_const_iterator_t insert(
internal_const_iterator_t iter,
laddr_t addr,
lba_map_val_t val,
LogicalCachedExtent* nextent) final {
LOG_PREFIX(LBALeafNode::insert);
SUBTRACE(seastore_fixedkv_tree, "trans.{}, pos {}, key {}, extent {}",
this->pending_for_transaction,
iter.get_offset(),
addr,
(void*)nextent);
this->insert_child_ptr(iter, nextent);
val.paddr = this->maybe_generate_relative(val.paddr);
this->journal_insert(
iter,
addr,
val,
this->maybe_get_delta_buffer());
return iter;
}
void remove(internal_const_iterator_t iter) final {
LOG_PREFIX(LBALeafNode::remove);
SUBTRACE(seastore_fixedkv_tree, "trans.{}, pos {}, key {}",
this->pending_for_transaction,
iter.get_offset(),
iter.get_key());
assert(iter != this->end());
this->remove_child_ptr(iter);
return this->journal_remove(
iter,
this->maybe_get_delta_buffer());
}
// See LBAInternalNode, same concept
void resolve_relative_addrs(paddr_t base);
void node_resolve_vals(
internal_iterator_t from,
internal_iterator_t to) const final
{
if (this->is_initial_pending()) {
for (auto i = from; i != to; ++i) {
auto val = i->get_val();
if (val.paddr.is_relative()) {
assert(val.paddr.is_block_relative());
val.paddr = this->get_paddr().add_relative(val.paddr);
i->set_val(val);
}
}
}
}
void node_unresolve_vals(
internal_iterator_t from,
internal_iterator_t to) const final
{
if (this->is_initial_pending()) {
for (auto i = from; i != to; ++i) {
auto val = i->get_val();
if (val.paddr.is_relative()) {
auto val = i->get_val();
assert(val.paddr.is_record_relative());
val.paddr = val.paddr.block_relative_to(this->get_paddr());
i->set_val(val);
}
}
}
}
extent_types_t get_type() const final {
return TYPE;
}
std::ostream &_print_detail(std::ostream &out) const final;
};
using LBALeafNodeRef = TCachedExtentRef<LBALeafNode>;
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::lba_manager::btree::lba_node_meta_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::lba_manager::btree::lba_map_val_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::lba_manager::btree::LBAInternalNode> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::lba_manager::btree::LBALeafNode> : fmt::ostream_formatter {};
#endif
| 8,388 | 28.027682 | 122 | h |
null | ceph-main/src/crimson/os/seastore/omap_manager/btree/btree_omap_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/omap_manager.h"
#include "crimson/os/seastore/omap_manager/btree/omap_btree_node.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
namespace crimson::os::seastore::omap_manager {
/**
* BtreeOMapManager
*
* Uses a btree to track :
* string -> string mapping for each onode omap
*/
class BtreeOMapManager : public OMapManager {
TransactionManager &tm;
omap_context_t get_omap_context(
Transaction &t, laddr_t addr_min) {
return omap_context_t{tm, t, addr_min};
}
/* get_omap_root
*
* load omap tree root node
*/
using get_root_iertr = base_iertr;
using get_root_ret = get_root_iertr::future<OMapNodeRef>;
static get_root_ret get_omap_root(
omap_context_t c,
const omap_root_t &omap_root);
/* handle_root_split
*
* root has been split and needs to update omap_root_t
*/
using handle_root_split_iertr = base_iertr;
using handle_root_split_ret = handle_root_split_iertr::future<>;
handle_root_split_ret handle_root_split(
omap_context_t c,
omap_root_t &omap_root,
const OMapNode::mutation_result_t& mresult);
/* handle_root_merge
*
* root node has only one item and it is not leaf node, need remove a layer
*/
using handle_root_merge_iertr = base_iertr;
using handle_root_merge_ret = handle_root_merge_iertr::future<>;
handle_root_merge_ret handle_root_merge(
omap_context_t oc,
omap_root_t &omap_root,
OMapNode:: mutation_result_t mresult);
public:
explicit BtreeOMapManager(TransactionManager &tm);
initialize_omap_ret initialize_omap(Transaction &t, laddr_t hint) final;
omap_get_value_ret omap_get_value(
const omap_root_t &omap_root,
Transaction &t,
const std::string &key) final;
omap_set_key_ret omap_set_key(
omap_root_t &omap_root,
Transaction &t,
const std::string &key, const ceph::bufferlist &value) final;
omap_set_keys_ret omap_set_keys(
omap_root_t &omap_root,
Transaction &t,
std::map<std::string, ceph::bufferlist>&& keys) final;
omap_rm_key_ret omap_rm_key(
omap_root_t &omap_root,
Transaction &t,
const std::string &key) final;
omap_rm_key_range_ret omap_rm_key_range(
omap_root_t &omap_root,
Transaction &t,
const std::string &first,
const std::string &last,
omap_list_config_t config) final;
omap_list_ret omap_list(
const omap_root_t &omap_root,
Transaction &t,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config = omap_list_config_t()) final;
omap_clear_ret omap_clear(
omap_root_t &omap_root,
Transaction &t) final;
};
using BtreeOMapManagerRef = std::unique_ptr<BtreeOMapManager>;
}
| 3,082 | 26.526786 | 77 | h |
null | ceph-main/src/crimson/os/seastore/omap_manager/btree/omap_btree_node.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <vector>
//#include <boost/iterator/counting_iterator.hpp>
#include "crimson/common/log.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/omap_manager.h"
#include "crimson/os/seastore/omap_manager/btree/omap_types.h"
namespace crimson::os::seastore::omap_manager{
struct omap_context_t {
TransactionManager &tm;
Transaction &t;
laddr_t hint;
};
enum class mutation_status_t : uint8_t {
SUCCESS = 0,
WAS_SPLIT = 1,
NEED_MERGE = 2,
FAIL = 3
};
struct OMapNode : LogicalCachedExtent {
using base_iertr = OMapManager::base_iertr;
using OMapNodeRef = TCachedExtentRef<OMapNode>;
struct mutation_result_t {
mutation_status_t status;
/// Only populated if WAS_SPLIT, indicates the newly created left and right nodes
/// from splitting the target entry during insertion.
std::optional<std::tuple<OMapNodeRef, OMapNodeRef, std::string>> split_tuple;
/// only sopulated if need merged, indicate which entry need be doing merge in upper layer.
std::optional<OMapNodeRef> need_merge;
mutation_result_t(mutation_status_t s, std::optional<std::tuple<OMapNodeRef,
OMapNodeRef, std::string>> tuple, std::optional<OMapNodeRef> n_merge)
: status(s),
split_tuple(tuple),
need_merge(n_merge) {}
};
OMapNode(ceph::bufferptr &&ptr) : LogicalCachedExtent(std::move(ptr)) {}
OMapNode(const OMapNode &other)
: LogicalCachedExtent(other) {}
using get_value_iertr = base_iertr;
using get_value_ret = OMapManager::omap_get_value_ret;
virtual get_value_ret get_value(
omap_context_t oc,
const std::string &key) = 0;
using insert_iertr = base_iertr;
using insert_ret = insert_iertr::future<mutation_result_t>;
virtual insert_ret insert(
omap_context_t oc,
const std::string &key,
const ceph::bufferlist &value) = 0;
using rm_key_iertr = base_iertr;
using rm_key_ret = rm_key_iertr::future<mutation_result_t>;
virtual rm_key_ret rm_key(
omap_context_t oc,
const std::string &key) = 0;
using omap_list_config_t = OMapManager::omap_list_config_t;
using list_iertr = base_iertr;
using list_bare_ret = OMapManager::omap_list_bare_ret;
using list_ret = OMapManager::omap_list_ret;
virtual list_ret list(
omap_context_t oc,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config) = 0;
using clear_iertr = base_iertr;
using clear_ret = clear_iertr::future<>;
virtual clear_ret clear(omap_context_t oc) = 0;
using full_merge_iertr = base_iertr;
using full_merge_ret = full_merge_iertr::future<OMapNodeRef>;
virtual full_merge_ret make_full_merge(
omap_context_t oc,
OMapNodeRef right) = 0;
using make_balanced_iertr = base_iertr;
using make_balanced_ret = make_balanced_iertr::future
<std::tuple<OMapNodeRef, OMapNodeRef, std::string>>;
virtual make_balanced_ret make_balanced(
omap_context_t oc,
OMapNodeRef _right) = 0;
virtual omap_node_meta_t get_node_meta() const = 0;
virtual bool extent_will_overflow(
size_t ksize,
std::optional<size_t> vsize) const = 0;
virtual bool can_merge(OMapNodeRef right) const = 0;
virtual bool extent_is_below_min() const = 0;
virtual uint32_t get_node_size() = 0;
virtual ~OMapNode() = default;
};
using OMapNodeRef = OMapNode::OMapNodeRef;
using omap_load_extent_iertr = OMapNode::base_iertr;
omap_load_extent_iertr::future<OMapNodeRef>
omap_load_extent(omap_context_t oc, laddr_t laddr, depth_t depth);
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::omap_manager::OMapNode> : fmt::ostream_formatter {};
#endif
| 3,874 | 30.504065 | 109 | h |
null | ceph-main/src/crimson/os/seastore/omap_manager/btree/omap_btree_node_impl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string.h>
#include "include/buffer.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/omap_manager.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/omap_manager/btree/string_kv_node_layout.h"
#include "crimson/os/seastore/omap_manager/btree/omap_types.h"
#include "crimson/os/seastore/omap_manager/btree/omap_btree_node.h"
namespace crimson::os::seastore::omap_manager {
/**
* OMapInnerNode
*
* Abstracts operations on and layout of internal nodes for the
* omap Tree.
*
* Layout (4k):
* num_entries: meta : keys : values :
*/
struct OMapInnerNode
: OMapNode,
StringKVInnerNodeLayout {
using OMapInnerNodeRef = TCachedExtentRef<OMapInnerNode>;
using internal_iterator_t = const_iterator;
template <typename... T>
OMapInnerNode(T&&... t) :
OMapNode(std::forward<T>(t)...),
StringKVInnerNodeLayout(get_bptr().c_str()) {}
omap_node_meta_t get_node_meta() const final { return get_meta(); }
bool extent_will_overflow(size_t ksize, std::optional<size_t> vsize) const {
return is_overflow(ksize);
}
bool can_merge(OMapNodeRef right) const {
return !is_overflow(*right->cast<OMapInnerNode>());
}
bool extent_is_below_min() const { return below_min(); }
uint32_t get_node_size() { return get_size(); }
CachedExtentRef duplicate_for_write(Transaction&) final {
assert(delta_buffer.empty());
return CachedExtentRef(new OMapInnerNode(*this));
}
delta_inner_buffer_t delta_buffer;
delta_inner_buffer_t *maybe_get_delta_buffer() {
return is_mutation_pending() ? &delta_buffer : nullptr;
}
get_value_ret get_value(omap_context_t oc, const std::string &key) final;
insert_ret insert(
omap_context_t oc,
const std::string &key,
const ceph::bufferlist &value) final;
rm_key_ret rm_key(
omap_context_t oc,
const std::string &key) final;
list_ret list(
omap_context_t oc,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config) final;
clear_ret clear(omap_context_t oc) final;
using split_children_iertr = base_iertr;
using split_children_ret = split_children_iertr::future
<std::tuple<OMapInnerNodeRef, OMapInnerNodeRef, std::string>>;
split_children_ret make_split_children(omap_context_t oc);
full_merge_ret make_full_merge(
omap_context_t oc, OMapNodeRef right) final;
make_balanced_ret make_balanced(
omap_context_t oc, OMapNodeRef right) final;
using make_split_insert_iertr = base_iertr;
using make_split_insert_ret = make_split_insert_iertr::future<mutation_result_t>;
make_split_insert_ret make_split_insert(
omap_context_t oc, internal_iterator_t iter,
std::string key, laddr_t laddr);
using merge_entry_iertr = base_iertr;
using merge_entry_ret = merge_entry_iertr::future<mutation_result_t>;
merge_entry_ret merge_entry(
omap_context_t oc,
internal_iterator_t iter, OMapNodeRef entry);
using handle_split_iertr = base_iertr;
using handle_split_ret = handle_split_iertr::future<mutation_result_t>;
handle_split_ret handle_split(
omap_context_t oc, internal_iterator_t iter,
mutation_result_t mresult);
std::ostream &print_detail_l(std::ostream &out) const final;
static constexpr extent_types_t TYPE = extent_types_t::OMAP_INNER;
extent_types_t get_type() const final {
return TYPE;
}
ceph::bufferlist get_delta() final {
ceph::bufferlist bl;
if (!delta_buffer.empty()) {
encode(delta_buffer, bl);
delta_buffer.clear();
}
return bl;
}
void apply_delta(const ceph::bufferlist &bl) final {
assert(bl.length());
delta_inner_buffer_t buffer;
auto bptr = bl.cbegin();
decode(buffer, bptr);
buffer.replay(*this);
}
internal_iterator_t get_containing_child(const std::string &key);
};
using OMapInnerNodeRef = OMapInnerNode::OMapInnerNodeRef;
/**
* OMapLeafNode
*
* Abstracts operations on and layout of leaf nodes for the
* OMap Tree.
*
* Layout (4k):
* num_entries: meta : keys : values :
*/
struct OMapLeafNode
: OMapNode,
StringKVLeafNodeLayout {
using OMapLeafNodeRef = TCachedExtentRef<OMapLeafNode>;
using internal_iterator_t = const_iterator;
template <typename... T>
OMapLeafNode(T&&... t) :
OMapNode(std::forward<T>(t)...),
StringKVLeafNodeLayout(get_bptr().c_str()) {}
omap_node_meta_t get_node_meta() const final { return get_meta(); }
bool extent_will_overflow(
size_t ksize, std::optional<size_t> vsize) const {
return is_overflow(ksize, *vsize);
}
bool can_merge(OMapNodeRef right) const {
return !is_overflow(*right->cast<OMapLeafNode>());
}
bool extent_is_below_min() const { return below_min(); }
uint32_t get_node_size() { return get_size(); }
CachedExtentRef duplicate_for_write(Transaction&) final {
assert(delta_buffer.empty());
return CachedExtentRef(new OMapLeafNode(*this));
}
delta_leaf_buffer_t delta_buffer;
delta_leaf_buffer_t *maybe_get_delta_buffer() {
return is_mutation_pending() ? &delta_buffer : nullptr;
}
get_value_ret get_value(
omap_context_t oc, const std::string &key) final;
insert_ret insert(
omap_context_t oc,
const std::string &key,
const ceph::bufferlist &value) final;
rm_key_ret rm_key(
omap_context_t oc, const std::string &key) final;
list_ret list(
omap_context_t oc,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config) final;
clear_ret clear(
omap_context_t oc) final;
using split_children_iertr = base_iertr;
using split_children_ret = split_children_iertr::future
<std::tuple<OMapLeafNodeRef, OMapLeafNodeRef, std::string>>;
split_children_ret make_split_children(
omap_context_t oc);
full_merge_ret make_full_merge(
omap_context_t oc,
OMapNodeRef right) final;
make_balanced_ret make_balanced(
omap_context_t oc,
OMapNodeRef _right) final;
static constexpr extent_types_t TYPE = extent_types_t::OMAP_LEAF;
extent_types_t get_type() const final {
return TYPE;
}
ceph::bufferlist get_delta() final {
ceph::bufferlist bl;
if (!delta_buffer.empty()) {
encode(delta_buffer, bl);
delta_buffer.clear();
}
return bl;
}
void apply_delta(const ceph::bufferlist &_bl) final {
assert(_bl.length());
ceph::bufferlist bl = _bl;
bl.rebuild();
delta_leaf_buffer_t buffer;
auto bptr = bl.cbegin();
decode(buffer, bptr);
buffer.replay(*this);
}
std::ostream &print_detail_l(std::ostream &out) const final;
std::pair<internal_iterator_t, internal_iterator_t>
get_leaf_entries(std::string &key);
};
using OMapLeafNodeRef = OMapLeafNode::OMapLeafNodeRef;
std::ostream &operator<<(std::ostream &out, const omap_inner_key_t &rhs);
std::ostream &operator<<(std::ostream &out, const omap_leaf_key_t &rhs);
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::omap_manager::OMapInnerNode> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::omap_manager::OMapLeafNode> : fmt::ostream_formatter {};
#endif
| 7,350 | 28.286853 | 114 | h |
null | ceph-main/src/crimson/os/seastore/omap_manager/btree/omap_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/seastore_types.h"
namespace crimson::os::seastore::omap_manager {
struct omap_node_meta_t {
depth_t depth = 0;
std::pair<omap_node_meta_t, omap_node_meta_t> split_into() const {
return std::make_pair(
omap_node_meta_t{depth},
omap_node_meta_t{depth});
}
static omap_node_meta_t merge_from(
const omap_node_meta_t &lhs, const omap_node_meta_t &rhs) {
assert(lhs.depth == rhs.depth);
return omap_node_meta_t{lhs.depth};
}
static std::pair<omap_node_meta_t, omap_node_meta_t>
rebalance(const omap_node_meta_t &lhs, const omap_node_meta_t &rhs) {
assert(lhs.depth == rhs.depth);
return std::make_pair(
omap_node_meta_t{lhs.depth},
omap_node_meta_t{lhs.depth});
}
};
struct omap_node_meta_le_t {
depth_le_t depth = init_depth_le(0);
omap_node_meta_le_t() = default;
omap_node_meta_le_t(const omap_node_meta_le_t &) = default;
explicit omap_node_meta_le_t(const omap_node_meta_t &val)
: depth(init_depth_le(val.depth)) {}
operator omap_node_meta_t() const {
return omap_node_meta_t{ depth };
}
};
struct omap_inner_key_t {
uint16_t key_off = 0;
uint16_t key_len = 0;
laddr_t laddr = 0;
omap_inner_key_t() = default;
omap_inner_key_t(uint16_t off, uint16_t len, laddr_t addr)
: key_off(off), key_len(len), laddr(addr) {}
inline bool operator==(const omap_inner_key_t b) const {
return key_off == b.key_off && key_len == b.key_len && laddr == b.laddr;
}
inline bool operator!=(const omap_inner_key_t b) const {
return key_off != b.key_off || key_len != b.key_len || laddr != b.laddr;
}
DENC(omap_inner_key_t, v, p) {
DENC_START(1, 1, p);
denc(v.key_off, p);
denc(v.key_len, p);
denc(v.laddr, p);
DENC_FINISH(p);
}
};
struct omap_inner_key_le_t {
ceph_le16 key_off{0};
ceph_le16 key_len{0};
laddr_le_t laddr{0};
omap_inner_key_le_t() = default;
omap_inner_key_le_t(const omap_inner_key_le_t &) = default;
explicit omap_inner_key_le_t(const omap_inner_key_t &key)
: key_off(key.key_off),
key_len(key.key_len),
laddr(key.laddr) {}
operator omap_inner_key_t() const {
return omap_inner_key_t{uint16_t(key_off), uint16_t(key_len), laddr_t(laddr)};
}
omap_inner_key_le_t& operator=(omap_inner_key_t key) {
key_off = key.key_off;
key_len = key.key_len;
laddr = laddr_le_t(key.laddr);
return *this;
}
inline bool operator==(const omap_inner_key_le_t b) const {
return key_off == b.key_off && key_len == b.key_len && laddr == b.laddr;
}
};
struct omap_leaf_key_t {
uint16_t key_off = 0;
uint16_t key_len = 0;
uint16_t val_len = 0;
omap_leaf_key_t() = default;
omap_leaf_key_t(uint16_t k_off, uint16_t k_len, uint16_t v_len)
: key_off(k_off), key_len(k_len), val_len(v_len) {}
inline bool operator==(const omap_leaf_key_t b) const {
return key_off == b.key_off && key_len == b.key_len &&
val_len == b.val_len;
}
inline bool operator!=(const omap_leaf_key_t b) const {
return key_off != b.key_off || key_len != b.key_len ||
val_len != b.val_len;
}
DENC(omap_leaf_key_t, v, p) {
DENC_START(1, 1, p);
denc(v.key_off, p);
denc(v.key_len, p);
denc(v.val_len, p);
DENC_FINISH(p);
}
};
struct omap_leaf_key_le_t {
ceph_le16 key_off{0};
ceph_le16 key_len{0};
ceph_le16 val_len{0};
omap_leaf_key_le_t() = default;
omap_leaf_key_le_t(const omap_leaf_key_le_t &) = default;
explicit omap_leaf_key_le_t(const omap_leaf_key_t &key)
: key_off(key.key_off),
key_len(key.key_len),
val_len(key.val_len) {}
operator omap_leaf_key_t() const {
return omap_leaf_key_t{uint16_t(key_off), uint16_t(key_len),
uint16_t(val_len)};
}
omap_leaf_key_le_t& operator=(omap_leaf_key_t key) {
key_off = key.key_off;
key_len = key.key_len;
val_len = key.val_len;
return *this;
}
inline bool operator==(const omap_leaf_key_le_t b) const {
return key_off == b.key_off && key_len == b.key_len &&
val_len == b.val_len;
}
};
}
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::omap_manager::omap_inner_key_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::omap_manager::omap_leaf_key_t)
| 4,373 | 26.683544 | 82 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/onode_manager.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/value.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/tree.h"
namespace crimson::os::seastore::onode {
struct FLTreeOnode final : Onode, Value {
static constexpr tree_conf_t TREE_CONF = {
value_magic_t::ONODE,
256, // max_ns_size
// same to option osd_max_object_namespace_len
2048, // max_oid_size
// same to option osd_max_object_name_len
1200, // max_value_payload_size
// see crimson::os::seastore::onode_layout_t
8192, // internal_node_size
// see the formula in validate_tree_config
16384 // leaf_node_size
// see the formula in validate_tree_config
};
enum class status_t {
STABLE,
MUTATED,
DELETED
} status = status_t::STABLE;
FLTreeOnode(FLTreeOnode&&) = default;
FLTreeOnode& operator=(FLTreeOnode&&) = delete;
FLTreeOnode(const FLTreeOnode&) = default;
FLTreeOnode& operator=(const FLTreeOnode&) = delete;
template <typename... T>
FLTreeOnode(uint32_t ddr, uint32_t dmr, T&&... args)
: Onode(ddr, dmr),
Value(std::forward<T>(args)...) {}
template <typename... T>
FLTreeOnode(T&&... args)
: Onode(0, 0),
Value(std::forward<T>(args)...) {}
struct Recorder : public ValueDeltaRecorder {
Recorder(bufferlist &bl) : ValueDeltaRecorder(bl) {}
value_magic_t get_header_magic() const final {
return TREE_CONF.value_magic;
}
void apply_value_delta(
ceph::bufferlist::const_iterator &bliter,
NodeExtentMutable &value,
laddr_t) final {
assert(value.get_length() == sizeof(onode_layout_t));
bliter.copy(value.get_length(), value.get_write());
}
void record_delta(NodeExtentMutable &value) {
// TODO: probably could use versioning, etc
assert(value.get_length() == sizeof(onode_layout_t));
ceph::buffer::ptr bptr(value.get_length());
memcpy(bptr.c_str(), value.get_read(), value.get_length());
get_encoded(value).append(bptr);
}
};
const onode_layout_t &get_layout() const final {
assert(status != status_t::DELETED);
return *read_payload<onode_layout_t>();
}
onode_layout_t &get_mutable_layout(Transaction &t) final {
assert(status != status_t::DELETED);
auto p = prepare_mutate_payload<
onode_layout_t,
Recorder>(t);
status = status_t::MUTATED;
return *reinterpret_cast<onode_layout_t*>(p.first.get_write());
};
void populate_recorder(Transaction &t) {
assert(status == status_t::MUTATED);
auto p = prepare_mutate_payload<
onode_layout_t,
Recorder>(t);
if (p.second) {
p.second->record_delta(
p.first);
}
status = status_t::STABLE;
}
void mark_delete() {
assert(status != status_t::DELETED);
status = status_t::DELETED;
}
laddr_t get_hint() const final {
return Value::get_hint();
}
~FLTreeOnode() final {}
};
using OnodeTree = Btree<FLTreeOnode>;
using crimson::common::get_conf;
class FLTreeOnodeManager : public crimson::os::seastore::OnodeManager {
OnodeTree tree;
uint32_t default_data_reservation = 0;
uint32_t default_metadata_offset = 0;
uint32_t default_metadata_range = 0;
public:
FLTreeOnodeManager(TransactionManager &tm) :
tree(NodeExtentManager::create_seastore(tm)),
default_data_reservation(
get_conf<uint64_t>("seastore_default_max_object_size")),
default_metadata_offset(default_data_reservation),
default_metadata_range(
get_conf<uint64_t>("seastore_default_object_metadata_reservation"))
{}
mkfs_ret mkfs(Transaction &t) {
return tree.mkfs(t);
}
contains_onode_ret contains_onode(
Transaction &trans,
const ghobject_t &hoid) final;
get_onode_ret get_onode(
Transaction &trans,
const ghobject_t &hoid) final;
get_or_create_onode_ret get_or_create_onode(
Transaction &trans,
const ghobject_t &hoid) final;
get_or_create_onodes_ret get_or_create_onodes(
Transaction &trans,
const std::vector<ghobject_t> &hoids) final;
write_dirty_ret write_dirty(
Transaction &trans,
const std::vector<OnodeRef> &onodes) final;
erase_onode_ret erase_onode(
Transaction &trans,
OnodeRef &onode) final;
list_onodes_ret list_onodes(
Transaction &trans,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) final;
~FLTreeOnodeManager();
};
using FLTreeOnodeManagerRef = std::unique_ptr<FLTreeOnodeManager>;
}
| 4,723 | 26.952663 | 73 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/fwd.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <algorithm>
#include <cstring>
#include <limits>
#include <memory>
#include <ostream>
#include <string>
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction.h"
namespace crimson::os::seastore::onode {
using eagain_iertr = trans_iertr<
crimson::errorator<crimson::ct_error::input_output_error> >;
template <class ValueT=void>
using eagain_ifuture = eagain_iertr::future<ValueT>;
using crimson::os::seastore::Transaction;
using crimson::os::seastore::TransactionRef;
using crimson::os::seastore::laddr_t;
using crimson::os::seastore::L_ADDR_MIN;
using crimson::os::seastore::L_ADDR_NULL;
using crimson::os::seastore::extent_len_t;
class DeltaRecorder;
class NodeExtent;
class NodeExtentManager;
class RootNodeTracker;
struct ValueBuilder;
using DeltaRecorderURef = std::unique_ptr<DeltaRecorder>;
using NodeExtentRef = crimson::os::seastore::TCachedExtentRef<NodeExtent>;
using NodeExtentManagerURef = std::unique_ptr<NodeExtentManager>;
using RootNodeTrackerURef = std::unique_ptr<RootNodeTracker>;
struct context_t {
NodeExtentManager& nm;
const ValueBuilder& vb;
Transaction& t;
};
class LeafNodeImpl;
class InternalNodeImpl;
class NodeImpl;
using LeafNodeImplURef = std::unique_ptr<LeafNodeImpl>;
using InternalNodeImplURef = std::unique_ptr<InternalNodeImpl>;
using NodeImplURef = std::unique_ptr<NodeImpl>;
using level_t = uint8_t;
constexpr auto MAX_LEVEL = std::numeric_limits<level_t>::max();
// a type only to index within a node, 32 bits should be enough
using index_t = uint32_t;
constexpr auto INDEX_END = std::numeric_limits<index_t>::max();
constexpr auto INDEX_LAST = INDEX_END - 0x4;
constexpr auto INDEX_UPPER_BOUND = INDEX_END - 0x8;
inline bool is_valid_index(index_t index) { return index < INDEX_UPPER_BOUND; }
// we support up to 64 KiB tree nodes
using node_offset_t = uint16_t;
constexpr node_offset_t DISK_BLOCK_SIZE = 1u << 12;
constexpr auto MAX_NODE_SIZE =
(extent_len_t)std::numeric_limits<node_offset_t>::max() + 1;
inline bool is_valid_node_size(extent_len_t node_size) {
return (node_size > 0 &&
node_size <= MAX_NODE_SIZE &&
node_size % DISK_BLOCK_SIZE == 0);
}
using string_size_t = uint16_t;
enum class MatchKindBS : int8_t { NE = -1, EQ = 0 };
enum class MatchKindCMP : int8_t { LT = -1, EQ = 0, GT };
inline MatchKindCMP toMatchKindCMP(int value) {
if (value > 0) {
return MatchKindCMP::GT;
} else if (value < 0) {
return MatchKindCMP::LT;
} else {
return MatchKindCMP::EQ;
}
}
template <typename Type>
MatchKindCMP toMatchKindCMP(const Type& l, const Type& r) {
if (l > r) {
return MatchKindCMP::GT;
} else if (l < r) {
return MatchKindCMP::LT;
} else {
return MatchKindCMP::EQ;
}
}
inline MatchKindCMP toMatchKindCMP(
std::string_view l, std::string_view r) {
return toMatchKindCMP(l.compare(r));
}
inline MatchKindCMP reverse(MatchKindCMP cmp) {
if (cmp == MatchKindCMP::LT) {
return MatchKindCMP::GT;
} else if (cmp == MatchKindCMP::GT) {
return MatchKindCMP::LT;
} else {
return cmp;
}
}
struct tree_stats_t {
size_t size_persistent_leaf = 0;
size_t size_persistent_internal = 0;
size_t size_filled_leaf = 0;
size_t size_filled_internal = 0;
size_t size_logical_leaf = 0;
size_t size_logical_internal = 0;
size_t size_overhead_leaf = 0;
size_t size_overhead_internal = 0;
size_t size_value_leaf = 0;
size_t size_value_internal = 0;
unsigned num_kvs_leaf = 0;
unsigned num_kvs_internal = 0;
unsigned num_nodes_leaf = 0;
unsigned num_nodes_internal = 0;
unsigned height = 0;
size_t size_persistent() const {
return size_persistent_leaf + size_persistent_internal; }
size_t size_filled() const {
return size_filled_leaf + size_filled_internal; }
size_t size_logical() const {
return size_logical_leaf + size_logical_internal; }
size_t size_overhead() const {
return size_overhead_leaf + size_overhead_internal; }
size_t size_value() const {
return size_value_leaf + size_value_internal; }
unsigned num_kvs() const {
return num_kvs_leaf + num_kvs_internal; }
unsigned num_nodes() const {
return num_nodes_leaf + num_nodes_internal; }
double ratio_fullness() const {
return (double)size_filled() / size_persistent(); }
double ratio_key_compression() const {
return (double)(size_filled() - size_value()) / (size_logical() - size_value()); }
double ratio_overhead() const {
return (double)size_overhead() / size_filled(); }
double ratio_keys_leaf() const {
return (double)num_kvs_leaf / num_kvs(); }
double ratio_nodes_leaf() const {
return (double)num_nodes_leaf / num_nodes(); }
double ratio_filled_leaf() const {
return (double)size_filled_leaf / size_filled(); }
};
inline std::ostream& operator<<(std::ostream& os, const tree_stats_t& stats) {
os << "Tree stats:"
<< "\n height = " << stats.height
<< "\n num values = " << stats.num_kvs_leaf
<< "\n num nodes = " << stats.num_nodes()
<< " (leaf=" << stats.num_nodes_leaf
<< ", internal=" << stats.num_nodes_internal << ")"
<< "\n size persistent = " << stats.size_persistent() << "B"
<< "\n size filled = " << stats.size_filled() << "B"
<< " (value=" << stats.size_value_leaf << "B"
<< ", rest=" << stats.size_filled() - stats.size_value_leaf << "B)"
<< "\n size logical = " << stats.size_logical() << "B"
<< "\n size overhead = " << stats.size_overhead() << "B"
<< "\n ratio fullness = " << stats.ratio_fullness()
<< "\n ratio keys leaf = " << stats.ratio_keys_leaf()
<< "\n ratio nodes leaf = " << stats.ratio_nodes_leaf()
<< "\n ratio filled leaf = " << stats.ratio_filled_leaf()
<< "\n ratio key compression = " << stats.ratio_key_compression();
assert(stats.num_kvs_internal + 1 == stats.num_nodes());
return os;
}
template <typename PtrType>
void reset_ptr(PtrType& ptr, const char* origin_base,
const char* new_base, extent_len_t node_size) {
assert((const char*)ptr > origin_base);
assert((const char*)ptr - origin_base < (int)node_size);
ptr = reinterpret_cast<PtrType>(
(const char*)ptr - origin_base + new_base);
}
}
#if FMT_VERSION >= 90000
template<>
struct fmt::formatter<crimson::os::seastore::onode::tree_stats_t> : fmt::ostream_formatter {};
#endif
| 6,567 | 32.340102 | 94 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <compare>
#include <map>
#include <memory>
#include <ostream>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "crimson/common/type_helpers.h"
#include "node_extent_mutable.h"
#include "stages/key_layout.h"
#include "stages/stage_types.h"
#include "super.h"
#include "value.h"
/**
* Tree example (2 levels):
*
* Root node keys: [ 3 7 ]
* values: [p1 p2 p3]
* / | \
* ------- | -------
* | | |
* V V V
* Leaf node keys: [ 1 2 3] [ 4 5 7] [ 9 11 12]
* values: [v1 v2 v3] [v4 v5 v6] [v7 v8 v9]
*
* Tree structure properties:
* - As illustrated above, the parent key is strictly equal to its left child's
* largest key;
* - If a tree is indexing multiple seastore transactions, each transaction
* will be mapped to a Super which points to a distinct root node. So the
* transactions are isolated at tree level. However, tree nodes from
* different transactions can reference the same seastore CachedExtent before
* modification;
* - The resources of the transactional tree are tracked by tree_cursor_ts held
* by users. As long as any cursor is alive, the according tree hierarchy is
* alive and keeps tracked. See the reversed resource management sections
* below;
*/
namespace crimson::os::seastore::onode {
class LeafNode;
class InternalNode;
using layout_version_t = uint32_t;
struct node_version_t {
layout_version_t layout;
nextent_state_t state;
bool operator==(const node_version_t& rhs) const {
return (layout == rhs.layout && state == rhs.state);
}
bool operator!=(const node_version_t& rhs) const {
return !(*this == rhs);
}
};
/**
* tree_cursor_t
*
* A cursor points to a position (LeafNode and search_position_t) of the tree
* where it can find the according key and value pair. The position is updated
* by LeafNode insert/split/delete/merge internally and is kept valid. It also
* caches the key-value information for a specific node layout version.
*
* Exposes public interfaces for Btree::Cursor.
*/
class tree_cursor_t final
: public boost::intrusive_ref_counter<
tree_cursor_t, boost::thread_unsafe_counter> {
public:
~tree_cursor_t();
tree_cursor_t(const tree_cursor_t&) = delete;
tree_cursor_t(tree_cursor_t&&) = delete;
tree_cursor_t& operator=(const tree_cursor_t&) = delete;
tree_cursor_t& operator=(tree_cursor_t&&) = delete;
// public to Btree
/**
* is_end
*
* Represents one-past-the-last of all the sorted key-value
* pairs in the tree. An end cursor won't contain valid key-value
* information.
*/
bool is_end() const { return !!ref_leaf_node && position.is_end(); }
/**
* is_tracked
*
* Represents a key-value pair stored in the tree, which is always tracked
* across insert/split/erase/merge operations.
*/
bool is_tracked() const { return !!ref_leaf_node && !position.is_end(); }
/**
* is_invalid
*
* Represents an invalid cursor which was once valid and tracked by the tree
* but is now erased and untracked. User may still hold an invalid cursor.
*/
bool is_invalid() const { return !ref_leaf_node; }
/// Returns the key view in tree if it is not an end cursor.
const key_view_t& get_key_view(value_magic_t magic) const {
assert(is_tracked());
return cache.get_key_view(magic, position);
}
/// Returns the next tree_cursor_t in tree, can be end if there's no next.
eagain_ifuture<Ref<tree_cursor_t>> get_next(context_t);
/// Check that this is next to prv
void assert_next_to(const tree_cursor_t&, value_magic_t) const;
/// Erases the key-value pair from tree.
template <bool FORCE_MERGE = false>
eagain_ifuture<Ref<tree_cursor_t>> erase(context_t, bool get_next);
std::strong_ordering compare_to(const tree_cursor_t&, value_magic_t) const;
// public to Value
/// Get the latest value_header_t pointer for read.
const value_header_t* read_value_header(value_magic_t magic) const {
assert(is_tracked());
return cache.get_p_value_header(magic, position);
}
/// Prepare the node extent to be mutable and recorded.
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t c) {
assert(is_tracked());
if (!is_mutated) {
is_mutated = true;
++(c.t.get_onode_tree_stats().num_updates);
}
return cache.prepare_mutate_value_payload(c, position);
}
/// Extends the size of value payload.
eagain_ifuture<> extend_value(context_t, value_size_t);
/// Trim and shrink the value payload.
eagain_ifuture<> trim_value(context_t, value_size_t);
static Ref<tree_cursor_t> get_invalid() {
Ref<tree_cursor_t> INVALID = new tree_cursor_t();
return INVALID;
}
private:
// create from insert
tree_cursor_t(Ref<LeafNode>, const search_position_t&);
// create from lookup
tree_cursor_t(Ref<LeafNode>, const search_position_t&,
const key_view_t&, const value_header_t*);
// lookup reaches the end, contain leaf node for further insert
tree_cursor_t(Ref<LeafNode>);
// create an invalid tree_cursor_t
tree_cursor_t() : cache{ref_leaf_node} {}
const search_position_t& get_position() const { return position; }
Ref<LeafNode> get_leaf_node() const { return ref_leaf_node; }
template <bool VALIDATE>
void update_track(Ref<LeafNode>, const search_position_t&);
void update_cache_same_node(const key_view_t&,
const value_header_t*) const;
void invalidate();
static Ref<tree_cursor_t> create_inserted(
Ref<LeafNode> node, const search_position_t& pos) {
return new tree_cursor_t(node, pos);
}
static Ref<tree_cursor_t> create_tracked(
Ref<LeafNode> node, const search_position_t& pos,
const key_view_t& key, const value_header_t* p_header) {
return new tree_cursor_t(node, pos, key, p_header);
}
static Ref<tree_cursor_t> create_end(Ref<LeafNode> node) {
return new tree_cursor_t(node);
}
/**
* Reversed resource management (tree_cursor_t)
*
* tree_cursor_t holds a reference to the LeafNode, so the LeafNode will be
* alive as long as any of it's cursors is still referenced by user.
*/
Ref<LeafNode> ref_leaf_node;
search_position_t position;
// account 1 update even if there are multiple updates to the same value
bool is_mutated = false;
/** Cache
*
* Cached memory pointers or views which may be outdated due to
* extent copy-on-write or asynchronous leaf node updates.
*/
class Cache {
public:
Cache(Ref<LeafNode>&);
void validate_is_latest(const search_position_t&) const;
void invalidate() { needs_update_all = true; }
void update_all(const node_version_t&, const key_view_t&, const value_header_t*);
const key_view_t& get_key_view(
value_magic_t magic, const search_position_t& pos) {
make_latest(magic, pos);
return *key_view;
}
const value_header_t* get_p_value_header(
value_magic_t magic, const search_position_t& pos) {
make_latest(magic, pos);
return p_value_header;
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t, const search_position_t&);
private:
void maybe_duplicate(const node_version_t&);
void make_latest(value_magic_t, const search_position_t&);
// metadata about how cache is valid
Ref<LeafNode>& ref_leaf_node;
bool needs_update_all = true;
node_version_t version;
// cached key value info
const char* p_node_base = nullptr;
std::optional<key_view_t> key_view;
const value_header_t* p_value_header = nullptr;
// cached data-structures to update value payload
std::optional<NodeExtentMutable> value_payload_mut;
ValueDeltaRecorder* p_value_recorder = nullptr;
};
mutable Cache cache;
friend class LeafNode;
friend class Node; // get_position(), get_leaf_node()
};
/**
* Node
*
* An abstracted class for both InternalNode and LeafNode.
*
* Exposes public interfaces for Btree.
*/
class Node
: public boost::intrusive_ref_counter<
Node, boost::thread_unsafe_counter> {
public:
// public to Btree
struct search_result_t {
bool is_end() const { return p_cursor->is_end(); }
Ref<tree_cursor_t> p_cursor;
match_stat_t mstat;
MatchKindBS match() const {
assert(mstat >= MSTAT_MIN && mstat <= MSTAT_MAX);
return (mstat == MSTAT_EQ ? MatchKindBS::EQ : MatchKindBS::NE);
}
void validate_input_key(const key_hobj_t& key, value_magic_t magic) const {
#ifndef NDEBUG
if (match() == MatchKindBS::EQ) {
assert(key == p_cursor->get_key_view(magic));
} else {
assert(match() == MatchKindBS::NE);
if (p_cursor->is_tracked()) {
assert(key < p_cursor->get_key_view(magic));
} else if (p_cursor->is_end()) {
// good
} else {
assert(p_cursor->is_invalid());
ceph_abort("impossible");
}
}
#endif
}
};
virtual ~Node();
Node(const Node&) = delete;
Node(Node&&) = delete;
Node& operator=(const Node&) = delete;
Node& operator=(Node&&) = delete;
/**
* level
*
* A positive value denotes the level (or height) of this node in tree.
* 0 means LeafNode, positive means InternalNode.
*/
level_t level() const;
/**
* lookup_smallest
*
* Returns a cursor pointing to the smallest key in the sub-tree formed by
* this node.
*
* Returns an end cursor if it is an empty root node.
*/
virtual eagain_ifuture<Ref<tree_cursor_t>> lookup_smallest(context_t) = 0;
/**
* lookup_largest
*
* Returns a cursor pointing to the largest key in the sub-tree formed by
* this node.
*
* Returns an end cursor if it is an empty root node.
*/
virtual eagain_ifuture<Ref<tree_cursor_t>> lookup_largest(context_t) = 0;
/**
* lower_bound
*
* Returns a cursor pointing to the first element in the range [first, last)
* of the sub-tree which does not compare less than the input key. The
* result also denotes whether the pointed key is equal to the input key.
*
* Returns an end cursor with MatchKindBS::NE if:
* - It is an empty root node;
* - Or the input key is larger than all the keys in the sub-tree;
*/
eagain_ifuture<search_result_t> lower_bound(context_t c, const key_hobj_t& key);
/**
* insert
*
* Try to insert a key-value pair into the sub-tree formed by this node.
*
* Returns a boolean denoting whether the insertion is successful:
* - If true, the returned cursor points to the inserted element in tree;
* - If false, the returned cursor points to the conflicting element in tree;
*/
eagain_ifuture<std::pair<Ref<tree_cursor_t>, bool>> insert(
context_t, const key_hobj_t&, value_config_t, Ref<Node>&&);
/**
* erase
*
* Removes a key-value pair from the sub-tree formed by this node.
*
* Returns the number of erased key-value pairs (0 or 1).
*/
eagain_ifuture<std::size_t> erase(context_t, const key_hobj_t&, Ref<Node>&&);
/// Recursively collects the statistics of the sub-tree formed by this node
eagain_ifuture<tree_stats_t> get_tree_stats(context_t);
/// Returns an ostream containing a dump of all the elements in the node.
std::ostream& dump(std::ostream&) const;
/// Returns an ostream containing an one-line summary of this node.
std::ostream& dump_brief(std::ostream&) const;
/// Print the node name
const std::string& get_name() const;
/// Initializes the tree by allocating an empty root node.
static eagain_ifuture<> mkfs(context_t, RootNodeTracker&);
/// Loads the tree root. The tree must be initialized.
static eagain_ifuture<Ref<Node>> load_root(context_t, RootNodeTracker&);
// Only for unit test purposes.
void test_make_destructable(context_t, NodeExtentMutable&, Super::URef&&);
virtual eagain_ifuture<> test_clone_root(context_t, RootNodeTracker&) const = 0;
protected:
virtual eagain_ifuture<> test_clone_non_root(context_t, Ref<InternalNode>) const {
ceph_abort("impossible path");
}
virtual eagain_ifuture<search_result_t> lower_bound_tracked(
context_t, const key_hobj_t&, MatchHistory&) = 0;
virtual eagain_ifuture<> do_get_tree_stats(context_t, tree_stats_t&) = 0;
virtual bool is_tracking() const = 0;
virtual void track_merge(Ref<Node>, match_stage_t, search_position_t&) = 0;
protected:
Node(NodeImplURef&&);
bool is_tracked() const {
assert(!(super && _parent_info.has_value()));
return (super || _parent_info.has_value());
}
bool is_root() const {
assert(is_tracked());
return !_parent_info.has_value();
}
// as root
void make_root(context_t c, Super::URef&& _super);
void make_root_new(context_t c, Super::URef&& _super) {
assert(_super->get_root_laddr() == L_ADDR_NULL);
make_root(c, std::move(_super));
}
void make_root_from(context_t c, Super::URef&& _super, laddr_t from_addr) {
assert(_super->get_root_laddr() == from_addr);
make_root(c, std::move(_super));
}
void as_root(Super::URef&& _super);
eagain_ifuture<> upgrade_root(context_t, laddr_t);
Super::URef deref_super();
// as child/non-root
template <bool VALIDATE = true>
void as_child(const search_position_t&, Ref<InternalNode>);
struct parent_info_t {
search_position_t position;
Ref<InternalNode> ptr;
};
const parent_info_t& parent_info() const { return *_parent_info; }
Ref<InternalNode> deref_parent();
eagain_ifuture<> apply_split_to_parent(context_t, Ref<Node>&&, Ref<Node>&&, bool);
eagain_ifuture<Ref<tree_cursor_t>> get_next_cursor_from_parent(context_t);
template <bool FORCE_MERGE = false>
eagain_ifuture<> try_merge_adjacent(context_t, bool, Ref<Node>&&);
eagain_ifuture<> erase_node(context_t, Ref<Node>&&);
template <bool FORCE_MERGE = false>
eagain_ifuture<> fix_parent_index(context_t, Ref<Node>&&, bool);
eagain_ifuture<NodeExtentMutable> rebuild_extent(context_t);
eagain_ifuture<> retire(context_t, Ref<Node>&&);
void make_tail(context_t);
private:
/**
* Reversed resource management (Node)
*
* Root Node holds a reference to its parent Super class, so its parent
* will be alive as long as this root node is alive.
*
* None-root Node holds a reference to its parent Node, so its parent will
* be alive as long as any of it's children is alive.
*/
// as root
Super::URef super;
// as child/non-root
std::optional<parent_info_t> _parent_info;
private:
static eagain_ifuture<Ref<Node>> load(context_t, laddr_t, bool expect_is_level_tail);
NodeImplURef impl;
friend class InternalNode;
};
inline std::ostream& operator<<(std::ostream& os, const Node& node) {
return node.dump_brief(os);
}
/**
* InternalNode
*
* A concrete implementation of Node class that represents an internal tree
* node. Its level is always positive and its values are logical block
* addresses to its child nodes. An internal node cannot be empty.
*/
class InternalNode final : public Node {
public:
// public to Node
InternalNode(InternalNodeImpl*, NodeImplURef&&);
~InternalNode() override { assert(tracked_child_nodes.empty()); }
InternalNode(const InternalNode&) = delete;
InternalNode(InternalNode&&) = delete;
InternalNode& operator=(const InternalNode&) = delete;
InternalNode& operator=(InternalNode&&) = delete;
eagain_ifuture<Ref<tree_cursor_t>> get_next_cursor(context_t, const search_position_t&);
eagain_ifuture<> apply_child_split(context_t, Ref<Node>&& left, Ref<Node>&& right, bool);
template <bool VALIDATE>
void do_track_child(Node& child) {
if constexpr (VALIDATE) {
validate_child(child);
}
auto& child_pos = child.parent_info().position;
assert(tracked_child_nodes.find(child_pos) == tracked_child_nodes.end());
tracked_child_nodes[child_pos] = &child;
}
void do_untrack_child(const Node& child) {
assert(check_is_tracking(child));
auto& child_pos = child.parent_info().position;
[[maybe_unused]] auto removed = tracked_child_nodes.erase(child_pos);
assert(removed);
}
bool check_is_tracking(const Node& child) const {
auto& child_pos = child.parent_info().position;
auto found = tracked_child_nodes.find(child_pos);
if (found != tracked_child_nodes.end() && found->second == &child) {
assert(child.parent_info().ptr == this);
return true;
} else {
return false;
}
}
eagain_ifuture<std::pair<Ref<Node>, Ref<Node>>> get_child_peers(
context_t, const search_position_t&);
eagain_ifuture<> erase_child(context_t, Ref<Node>&&);
template <bool FORCE_MERGE = false>
eagain_ifuture<> fix_index(context_t, Ref<Node>&&, bool);
template <bool FORCE_MERGE = false>
eagain_ifuture<> apply_children_merge(
context_t, Ref<Node>&& left, laddr_t, Ref<Node>&& right, bool update_index);
void validate_child_tracked(const Node& child) const {
validate_child(child);
assert(tracked_child_nodes.find(child.parent_info().position) !=
tracked_child_nodes.end());
assert(tracked_child_nodes.find(child.parent_info().position)->second == &child);
}
void validate_child_inconsistent(const Node& child) const;
void validate_tracked_children() const {
#ifndef NDEBUG
for (auto& kv : tracked_child_nodes) {
assert(kv.first == kv.second->parent_info().position);
validate_child(*kv.second);
}
#endif
}
void track_make_tail(const search_position_t&);
static eagain_ifuture<Ref<InternalNode>> allocate_root(
context_t, laddr_t, level_t, laddr_t, Super::URef&&);
protected:
eagain_ifuture<Ref<tree_cursor_t>> lookup_smallest(context_t) override;
eagain_ifuture<Ref<tree_cursor_t>> lookup_largest(context_t) override;
eagain_ifuture<search_result_t> lower_bound_tracked(
context_t, const key_hobj_t&, MatchHistory&) override;
eagain_ifuture<> do_get_tree_stats(context_t, tree_stats_t&) override;
bool is_tracking() const override {
return !tracked_child_nodes.empty();
}
void track_merge(Ref<Node>, match_stage_t, search_position_t&) override;
eagain_ifuture<> test_clone_root(context_t, RootNodeTracker&) const override;
private:
eagain_ifuture<> try_downgrade_root(context_t, Ref<Node>&&);
eagain_ifuture<Ref<InternalNode>> insert_or_split(
context_t, const search_position_t&, const key_view_t&, Ref<Node>,
Ref<Node> outdated_child=nullptr);
// XXX: extract a common tracker for InternalNode to track Node,
// and LeafNode to track tree_cursor_t.
eagain_ifuture<Ref<Node>> get_or_track_child(context_t, const search_position_t&, laddr_t);
template <bool VALIDATE = true>
void track_insert(
const search_position_t&, match_stage_t, Ref<Node>, Ref<Node> nxt_child = nullptr);
void replace_track(Ref<Node> new_child, Ref<Node> old_child, bool);
void track_split(const search_position_t&, Ref<InternalNode>);
template <bool VALIDATE = true>
void track_erase(const search_position_t&, match_stage_t);
void validate_child(const Node& child) const;
struct fresh_node_t {
Ref<InternalNode> node;
NodeExtentMutable mut;
std::pair<Ref<Node>, NodeExtentMutable> make_pair() {
return std::make_pair(Ref<Node>(node), mut);
}
};
static eagain_ifuture<fresh_node_t> allocate(context_t, laddr_t, field_type_t, bool, level_t);
private:
/**
* Reversed resource management (InternalNode)
*
* InteralNode keeps track of its child nodes which are still alive in
* memory, and their positions will be updated throughout
* insert/split/delete/merge operations of this node.
*/
// XXX: leverage intrusive data structure to control memory overhead
std::map<search_position_t, Node*> tracked_child_nodes;
InternalNodeImpl* impl;
};
/**
* LeafNode
*
* A concrete implementation of Node class that represents a leaf tree node.
* Its level is always 0. A leaf node can only be empty if it is root.
*/
class LeafNode final : public Node {
public:
// public to tree_cursor_t
~LeafNode() override { assert(tracked_cursors.empty()); }
LeafNode(const LeafNode&) = delete;
LeafNode(LeafNode&&) = delete;
LeafNode& operator=(const LeafNode&) = delete;
LeafNode& operator=(LeafNode&&) = delete;
bool is_level_tail() const;
node_version_t get_version() const;
const char* read() const;
extent_len_t get_node_size() const;
std::tuple<key_view_t, const value_header_t*> get_kv(const search_position_t&) const;
eagain_ifuture<Ref<tree_cursor_t>> get_next_cursor(context_t, const search_position_t&);
/**
* erase
*
* Removes a key-value pair from the position.
*
* If get_next is true, returns the cursor pointing to the next key-value
* pair that followed the erased element, which can be nullptr if is end.
*/
template <bool FORCE_MERGE>
eagain_ifuture<Ref<tree_cursor_t>> erase(
context_t, const search_position_t&, bool get_next);
template <bool VALIDATE>
void do_track_cursor(tree_cursor_t& cursor) {
if constexpr (VALIDATE) {
validate_cursor(cursor);
}
auto& cursor_pos = cursor.get_position();
assert(tracked_cursors.find(cursor_pos) == tracked_cursors.end());
tracked_cursors.emplace(cursor_pos, &cursor);
}
void do_untrack_cursor(const tree_cursor_t& cursor) {
validate_cursor(cursor);
auto& cursor_pos = cursor.get_position();
assert(check_is_tracking(cursor));
[[maybe_unused]] auto removed = tracked_cursors.erase(cursor_pos);
assert(removed);
}
bool check_is_tracking(const tree_cursor_t& cursor) const {
auto& cursor_pos = cursor.get_position();
auto found = tracked_cursors.find(cursor_pos);
if (found != tracked_cursors.end() && found->second == &cursor) {
assert(cursor.ref_leaf_node == this);
return true;
} else {
return false;
}
}
eagain_ifuture<> extend_value(context_t, const search_position_t&, value_size_t);
eagain_ifuture<> trim_value(context_t, const search_position_t&, value_size_t);
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t);
protected:
eagain_ifuture<Ref<tree_cursor_t>> lookup_smallest(context_t) override;
eagain_ifuture<Ref<tree_cursor_t>> lookup_largest(context_t) override;
eagain_ifuture<search_result_t> lower_bound_tracked(
context_t, const key_hobj_t&, MatchHistory&) override;
eagain_ifuture<> do_get_tree_stats(context_t, tree_stats_t&) override;
bool is_tracking() const override {
return !tracked_cursors.empty();
}
void track_merge(Ref<Node>, match_stage_t, search_position_t&) override;
eagain_ifuture<> test_clone_root(context_t, RootNodeTracker&) const override;
private:
LeafNode(LeafNodeImpl*, NodeImplURef&&);
eagain_ifuture<Ref<tree_cursor_t>> insert_value(
context_t, const key_hobj_t&, value_config_t,
const search_position_t&, const MatchHistory&,
match_stat_t mstat);
static eagain_ifuture<Ref<LeafNode>> allocate_root(context_t, RootNodeTracker&);
friend class Node;
private:
// XXX: extract a common tracker for InternalNode to track Node,
// and LeafNode to track tree_cursor_t.
Ref<tree_cursor_t> get_or_track_cursor(
const search_position_t&, const key_view_t&, const value_header_t*);
Ref<tree_cursor_t> track_insert(
const search_position_t&, match_stage_t, const value_header_t*);
void track_split(const search_position_t&, Ref<LeafNode>);
void track_erase(const search_position_t&, match_stage_t);
void validate_tracked_cursors() const {
#ifndef NDEBUG
for (auto& kv : tracked_cursors) {
assert(kv.first == kv.second->get_position());
validate_cursor(*kv.second);
}
#endif
}
void validate_cursor(const tree_cursor_t& cursor) const;
// invalidate p_value pointers in tree_cursor_t
void on_layout_change() { ++layout_version; }
struct fresh_node_t {
Ref<LeafNode> node;
NodeExtentMutable mut;
std::pair<Ref<Node>, NodeExtentMutable> make_pair() {
return std::make_pair(Ref<Node>(node), mut);
}
};
static eagain_ifuture<fresh_node_t> allocate(context_t, laddr_t, field_type_t, bool);
private:
/**
* Reversed resource management (LeafNode)
*
* LeafNode keeps track of the referencing cursors which are still alive in
* memory, and their positions will be updated throughout
* insert/split/delete/merge operations of this node.
*/
// XXX: leverage intrusive data structure to control memory overhead
std::map<search_position_t, tree_cursor_t*> tracked_cursors;
LeafNodeImpl* impl;
layout_version_t layout_version = 0;
};
}
| 24,933 | 32.513441 | 96 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_delta_recorder.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/buffer.h"
#include "node_types.h"
#include "value.h"
namespace crimson::os::seastore::onode {
/**
* DeltaRecorder
*
* An abstracted class to encapsulate different implementations to apply delta
* to a specific node layout.
*/
class DeltaRecorder {
public:
virtual ~DeltaRecorder() {
/* May be non-empty if transaction is abandoned without
* being submitted -- conflicts are a particularly common
* example (denoted generally by returning crimson::ct_error::eagain).
*/
}
bool is_empty() const {
return encoded.length() == 0;
}
ceph::bufferlist get_delta() {
return std::move(encoded);
}
ValueDeltaRecorder* get_value_recorder() const {
assert(value_recorder);
return value_recorder.get();
}
virtual node_type_t node_type() const = 0;
virtual field_type_t field_type() const = 0;
virtual void apply_delta(ceph::bufferlist::const_iterator&,
NodeExtentMutable&,
const NodeExtent&) = 0;
protected:
DeltaRecorder() = default;
DeltaRecorder(const ValueBuilder& vb)
: value_recorder{vb.build_value_recorder(encoded)} {}
ceph::bufferlist encoded;
std::unique_ptr<ValueDeltaRecorder> value_recorder;
};
}
| 1,368 | 23.446429 | 78 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_accessor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/logging.h"
#include "node_extent_manager.h"
#include "node_delta_recorder.h"
#include "node_layout_replayable.h"
#include "value.h"
#ifndef NDEBUG
#include "node_extent_manager/test_replay.h"
#endif
namespace crimson::os::seastore::onode {
/**
* DeltaRecorderT
*
* Responsible to encode and decode delta, and apply delta for a specific node
* layout.
*/
template <typename FieldType, node_type_t NODE_TYPE>
class DeltaRecorderT final: public DeltaRecorder {
public:
using layout_t = NodeLayoutReplayableT<FieldType, NODE_TYPE>;
using node_stage_t = typename layout_t::node_stage_t;
using position_t = typename layout_t::position_t;
using StagedIterator = typename layout_t::StagedIterator;
using value_input_t = typename layout_t::value_input_t;
static constexpr auto FIELD_TYPE = layout_t::FIELD_TYPE;
~DeltaRecorderT() override = default;
template <KeyT KT>
void encode_insert(
const full_key_t<KT>& key,
const value_input_t& value,
const position_t& insert_pos,
const match_stage_t& insert_stage,
const node_offset_t& insert_size) {
ceph::encode(node_delta_op_t::INSERT, encoded);
encode_key(key, encoded);
encode_value(value, encoded);
insert_pos.encode(encoded);
ceph::encode(insert_stage, encoded);
ceph::encode(insert_size, encoded);
}
void encode_split(
const StagedIterator& split_at,
const char* p_node_start) {
ceph::encode(node_delta_op_t::SPLIT, encoded);
split_at.encode(p_node_start, encoded);
}
template <KeyT KT>
void encode_split_insert(
const StagedIterator& split_at,
const full_key_t<KT>& key,
const value_input_t& value,
const position_t& insert_pos,
const match_stage_t& insert_stage,
const node_offset_t& insert_size,
const char* p_node_start) {
ceph::encode(node_delta_op_t::SPLIT_INSERT, encoded);
split_at.encode(p_node_start, encoded);
encode_key(key, encoded);
encode_value(value, encoded);
insert_pos.encode(encoded);
ceph::encode(insert_stage, encoded);
ceph::encode(insert_size, encoded);
}
void encode_update_child_addr(
const laddr_t new_addr,
const laddr_packed_t* p_addr,
const char* p_node_start,
extent_len_t node_size) {
ceph::encode(node_delta_op_t::UPDATE_CHILD_ADDR, encoded);
ceph::encode(new_addr, encoded);
int node_offset = reinterpret_cast<const char*>(p_addr) - p_node_start;
assert(node_offset > 0 && node_offset < (int)node_size);
ceph::encode(static_cast<node_offset_t>(node_offset), encoded);
}
void encode_erase(
const position_t& erase_pos) {
ceph::encode(node_delta_op_t::ERASE, encoded);
erase_pos.encode(encoded);
}
void encode_make_tail() {
ceph::encode(node_delta_op_t::MAKE_TAIL, encoded);
}
static DeltaRecorderURef create_for_encode(const ValueBuilder& v_builder) {
return std::unique_ptr<DeltaRecorder>(new DeltaRecorderT(v_builder));
}
static DeltaRecorderURef create_for_replay() {
return std::unique_ptr<DeltaRecorder>(new DeltaRecorderT());
}
protected:
DeltaRecorderT() : DeltaRecorder() {}
DeltaRecorderT(const ValueBuilder& vb) : DeltaRecorder(vb) {}
node_type_t node_type() const override { return NODE_TYPE; }
field_type_t field_type() const override { return FIELD_TYPE; }
void apply_delta(ceph::bufferlist::const_iterator& delta,
NodeExtentMutable& mut,
const NodeExtent& node) override {
LOG_PREFIX(OTree::Extent::Replay);
assert(is_empty());
node_stage_t stage(reinterpret_cast<const FieldType*>(mut.get_read()),
mut.get_length());
node_delta_op_t op;
try {
ceph::decode(op, delta);
switch (op) {
case node_delta_op_t::INSERT: {
SUBDEBUG(seastore_onode, "decoding INSERT ...");
auto key = key_hobj_t::decode(delta);
auto value = decode_value(delta);
auto insert_pos = position_t::decode(delta);
match_stage_t insert_stage;
ceph::decode(insert_stage, delta);
node_offset_t insert_size;
ceph::decode(insert_size, delta);
SUBDEBUG(seastore_onode,
"apply {}, {}, insert_pos({}), insert_stage={}, "
"insert_size={}B ...",
key, value, insert_pos, insert_stage, insert_size);
layout_t::template insert<KeyT::HOBJ>(
mut, stage, key, value, insert_pos, insert_stage, insert_size);
break;
}
case node_delta_op_t::SPLIT: {
SUBDEBUG(seastore_onode, "decoding SPLIT ...");
auto split_at = StagedIterator::decode(
mut.get_read(), mut.get_length(), delta);
SUBDEBUG(seastore_onode, "apply split_at={} ...", split_at);
layout_t::split(mut, stage, split_at);
break;
}
case node_delta_op_t::SPLIT_INSERT: {
SUBDEBUG(seastore_onode, "decoding SPLIT_INSERT ...");
auto split_at = StagedIterator::decode(
mut.get_read(), mut.get_length(), delta);
auto key = key_hobj_t::decode(delta);
auto value = decode_value(delta);
auto insert_pos = position_t::decode(delta);
match_stage_t insert_stage;
ceph::decode(insert_stage, delta);
node_offset_t insert_size;
ceph::decode(insert_size, delta);
SUBDEBUG(seastore_onode,
"apply split_at={}, {}, {}, insert_pos({}), insert_stage={}, "
"insert_size={}B ...",
split_at, key, value, insert_pos, insert_stage, insert_size);
layout_t::template split_insert<KeyT::HOBJ>(
mut, stage, split_at, key, value, insert_pos, insert_stage, insert_size);
break;
}
case node_delta_op_t::UPDATE_CHILD_ADDR: {
SUBDEBUG(seastore_onode, "decoding UPDATE_CHILD_ADDR ...");
laddr_t new_addr;
ceph::decode(new_addr, delta);
node_offset_t update_offset;
ceph::decode(update_offset, delta);
auto p_addr = reinterpret_cast<laddr_packed_t*>(
mut.get_write() + update_offset);
SUBDEBUG(seastore_onode,
"apply {:#x} to offset {:#x} ...",
new_addr, update_offset);
layout_t::update_child_addr(mut, new_addr, p_addr);
break;
}
case node_delta_op_t::ERASE: {
SUBDEBUG(seastore_onode, "decoding ERASE ...");
auto erase_pos = position_t::decode(delta);
SUBDEBUG(seastore_onode, "apply erase_pos({}) ...", erase_pos);
layout_t::erase(mut, stage, erase_pos);
break;
}
case node_delta_op_t::MAKE_TAIL: {
SUBDEBUG(seastore_onode, "decoded MAKE_TAIL, apply ...");
layout_t::make_tail(mut, stage);
break;
}
case node_delta_op_t::SUBOP_UPDATE_VALUE: {
SUBDEBUG(seastore_onode, "decoding SUBOP_UPDATE_VALUE ...");
node_offset_t value_header_offset;
ceph::decode(value_header_offset, delta);
auto p_header = mut.get_read() + value_header_offset;
auto p_header_ = reinterpret_cast<const value_header_t*>(p_header);
SUBDEBUG(seastore_onode, "update {} at {:#x} ...", *p_header_, value_header_offset);
auto payload_mut = p_header_->get_payload_mutable(mut);
auto value_addr = node.get_laddr() + payload_mut.get_node_offset();
get_value_replayer(p_header_->magic)->apply_value_delta(
delta, payload_mut, value_addr);
break;
}
default:
SUBERROR(seastore_onode,
"got unknown op {} when replay {}",
op, node);
ceph_abort("fatal error");
}
} catch (buffer::error& e) {
SUBERROR(seastore_onode,
"got decode error {} when replay {}",
e.what(), node);
ceph_abort("fatal error");
}
}
private:
ValueDeltaRecorder* get_value_replayer(value_magic_t magic) {
// Replay procedure is independent of Btree and happens at lower level in
// seastore. There is no ValueBuilder so the recoder needs to build the
// ValueDeltaRecorder by itself.
if (value_replayer) {
if (value_replayer->get_header_magic() != magic) {
ceph_abort_msgf("OTree::Extent::Replay: value magic mismatch %x != %x",
value_replayer->get_header_magic(), magic);
}
} else {
value_replayer = build_value_recorder_by_type(encoded, magic);
if (!value_replayer) {
ceph_abort_msgf("OTree::Extent::Replay: got unexpected value magic = %x",
magic);
}
}
return value_replayer.get();
}
void encode_value(const value_input_t& value, ceph::bufferlist& encoded) const {
if constexpr (std::is_same_v<value_input_t, laddr_t>) {
// NODE_TYPE == node_type_t::INTERNAL
ceph::encode(value, encoded);
} else if constexpr (std::is_same_v<value_input_t, value_config_t>) {
// NODE_TYPE == node_type_t::LEAF
value.encode(encoded);
} else {
ceph_abort("impossible path");
}
}
value_input_t decode_value(ceph::bufferlist::const_iterator& delta) const {
if constexpr (std::is_same_v<value_input_t, laddr_t>) {
// NODE_TYPE == node_type_t::INTERNAL
laddr_t value;
ceph::decode(value, delta);
return value;
} else if constexpr (std::is_same_v<value_input_t, value_config_t>) {
// NODE_TYPE == node_type_t::LEAF
return value_config_t::decode(delta);
} else {
ceph_abort("impossible path");
}
}
std::unique_ptr<ValueDeltaRecorder> value_replayer;
};
/**
* NodeExtentAccessorT
*
* This component is responsible to reference and mutate the underlying
* NodeExtent, record mutation parameters when needed, and apply the recorded
* modifications for a specific node layout.
*
* For possible internal states, see node_types.h.
*/
template <typename FieldType, node_type_t NODE_TYPE>
class NodeExtentAccessorT {
public:
using layout_t = NodeLayoutReplayableT<FieldType, NODE_TYPE>;
using node_stage_t = typename layout_t::node_stage_t;
using position_t = typename layout_t::position_t;
using recorder_t = DeltaRecorderT<FieldType, NODE_TYPE>;
using StagedIterator = typename layout_t::StagedIterator;
using value_input_t = typename layout_t::value_input_t;
using value_t = typename layout_t::value_t;
static constexpr auto FIELD_TYPE = layout_t::FIELD_TYPE;
NodeExtentAccessorT(NodeExtentRef extent)
: extent{extent},
node_stage{reinterpret_cast<const FieldType*>(extent->get_read()),
extent->get_length()} {
assert(is_valid_node_size(extent->get_length()));
if (extent->is_initial_pending()) {
state = nextent_state_t::FRESH;
mut.emplace(extent->get_mutable());
assert(extent->get_recorder() == nullptr);
recorder = nullptr;
} else if (extent->is_mutation_pending()) {
state = nextent_state_t::MUTATION_PENDING;
mut.emplace(extent->get_mutable());
auto p_recorder = extent->get_recorder();
assert(p_recorder != nullptr);
assert(p_recorder->node_type() == NODE_TYPE);
assert(p_recorder->field_type() == FIELD_TYPE);
recorder = static_cast<recorder_t*>(p_recorder);
} else if (!extent->is_mutable() && extent->is_valid()) {
state = nextent_state_t::READ_ONLY;
// mut is empty
assert(extent->get_recorder() == nullptr ||
extent->get_recorder()->is_empty());
recorder = nullptr;
} else {
// extent is invalid or retired
ceph_abort("impossible path");
}
#ifndef NDEBUG
auto ref_recorder = recorder_t::create_for_replay();
test_recorder = static_cast<recorder_t*>(ref_recorder.get());
test_extent = TestReplayExtent::create(
get_length(), std::move(ref_recorder));
#endif
}
~NodeExtentAccessorT() = default;
NodeExtentAccessorT(const NodeExtentAccessorT&) = delete;
NodeExtentAccessorT(NodeExtentAccessorT&&) = delete;
NodeExtentAccessorT& operator=(const NodeExtentAccessorT&) = delete;
NodeExtentAccessorT& operator=(NodeExtentAccessorT&&) = delete;
const node_stage_t& read() const { return node_stage; }
laddr_t get_laddr() const { return extent->get_laddr(); }
extent_len_t get_length() const {
auto len = extent->get_length();
assert(is_valid_node_size(len));
return len;
}
nextent_state_t get_state() const {
assert(!is_retired());
// we cannot rely on the underlying extent state because
// FRESH/MUTATION_PENDING can become DIRTY after transaction submission.
return state;
}
bool is_retired() const {
if (extent) {
return false;
} else {
return true;
}
}
// must be called before any mutate attempes.
// for the safety of mixed read and mutate, call before read.
void prepare_mutate(context_t c) {
assert(!is_retired());
if (state == nextent_state_t::READ_ONLY) {
assert(!extent->is_mutable());
auto ref_recorder = recorder_t::create_for_encode(c.vb);
recorder = static_cast<recorder_t*>(ref_recorder.get());
extent = extent->mutate(c, std::move(ref_recorder));
state = nextent_state_t::MUTATION_PENDING;
assert(extent->is_mutation_pending());
node_stage = node_stage_t(reinterpret_cast<const FieldType*>(extent->get_read()),
get_length());
assert(recorder == static_cast<recorder_t*>(extent->get_recorder()));
mut.emplace(extent->get_mutable());
}
assert(extent->is_mutable());
}
template <KeyT KT>
const value_t* insert_replayable(
const full_key_t<KT>& key,
const value_input_t& value,
position_t& insert_pos,
match_stage_t& insert_stage,
node_offset_t& insert_size) {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->template encode_insert<KT>(
key, value, insert_pos, insert_stage, insert_size);
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->template encode_insert<KT>(
key, value, insert_pos, insert_stage, insert_size);
#endif
auto ret = layout_t::template insert<KT>(
*mut, read(), key, value,
insert_pos, insert_stage, insert_size);
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
return ret;
}
void split_replayable(StagedIterator& split_at) {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->encode_split(split_at, read().p_start());
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->encode_split(split_at, read().p_start());
#endif
layout_t::split(*mut, read(), split_at);
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
}
template <KeyT KT>
const value_t* split_insert_replayable(
StagedIterator& split_at,
const full_key_t<KT>& key,
const value_input_t& value,
position_t& insert_pos,
match_stage_t& insert_stage,
node_offset_t& insert_size) {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->template encode_split_insert<KT>(
split_at, key, value, insert_pos, insert_stage, insert_size,
read().p_start());
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->template encode_split_insert<KT>(
split_at, key, value, insert_pos, insert_stage, insert_size,
read().p_start());
#endif
auto ret = layout_t::template split_insert<KT>(
*mut, read(), split_at, key, value,
insert_pos, insert_stage, insert_size);
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
return ret;
}
void update_child_addr_replayable(
const laddr_t new_addr, laddr_packed_t* p_addr) {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->encode_update_child_addr(
new_addr, p_addr, read().p_start(), get_length());
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->encode_update_child_addr(
new_addr, p_addr, read().p_start(), get_length());
#endif
layout_t::update_child_addr(*mut, new_addr, p_addr);
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
}
std::tuple<match_stage_t, position_t> erase_replayable(const position_t& pos) {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->encode_erase(pos);
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->encode_erase(pos);
#endif
auto ret = layout_t::erase(*mut, read(), pos);
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
return ret;
}
position_t make_tail_replayable() {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->encode_make_tail();
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->encode_make_tail();
#endif
auto ret = layout_t::make_tail(*mut, read());
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
return ret;
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t c) {
prepare_mutate(c);
ValueDeltaRecorder* p_value_recorder = nullptr;
if (state == nextent_state_t::MUTATION_PENDING) {
p_value_recorder = recorder->get_value_recorder();
}
return {*mut, p_value_recorder};
}
void test_copy_to(NodeExtentMutable& to) const {
assert(extent->get_length() == to.get_length());
std::memcpy(to.get_write(), extent->get_read(), get_length());
}
eagain_ifuture<NodeExtentMutable> rebuild(context_t c, laddr_t hint) {
LOG_PREFIX(OTree::Extent::rebuild);
assert(!is_retired());
if (state == nextent_state_t::FRESH) {
assert(extent->is_initial_pending());
// already fresh and no need to record
return eagain_iertr::make_ready_future<NodeExtentMutable>(*mut);
}
assert(!extent->is_initial_pending());
auto alloc_size = get_length();
return c.nm.alloc_extent(c.t, hint, alloc_size
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, alloc_size, l_to_discard = extent->get_laddr()] {
SUBERRORT(seastore_onode,
"EIO during allocate -- node_size={}, to_discard={:x}",
c.t, alloc_size, l_to_discard);
ceph_abort("fatal error");
})
).si_then([this, c, FNAME] (auto fresh_extent) {
SUBDEBUGT(seastore_onode,
"update addr from {:#x} to {:#x} ...",
c.t, extent->get_laddr(), fresh_extent->get_laddr());
assert(fresh_extent);
assert(fresh_extent->is_initial_pending());
assert(fresh_extent->get_recorder() == nullptr);
assert(get_length() == fresh_extent->get_length());
auto fresh_mut = fresh_extent->get_mutable();
std::memcpy(fresh_mut.get_write(), extent->get_read(), get_length());
NodeExtentRef to_discard = extent;
extent = fresh_extent;
node_stage = node_stage_t(reinterpret_cast<const FieldType*>(extent->get_read()),
get_length());
state = nextent_state_t::FRESH;
mut.emplace(fresh_mut);
recorder = nullptr;
return c.nm.retire_extent(c.t, to_discard
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, l_to_discard = to_discard->get_laddr(),
l_fresh = fresh_extent->get_laddr()] {
SUBERRORT(seastore_onode,
"EIO during retire -- to_disgard={:x}, fresh={:x}",
c.t, l_to_discard, l_fresh);
ceph_abort("fatal error");
}),
crimson::ct_error::enoent::handle(
[FNAME, c, l_to_discard = to_discard->get_laddr(),
l_fresh = fresh_extent->get_laddr()] {
SUBERRORT(seastore_onode,
"ENOENT during retire -- to_disgard={:x}, fresh={:x}",
c.t, l_to_discard, l_fresh);
ceph_abort("fatal error");
})
);
}).si_then([this, c] {
boost::ignore_unused(c); // avoid clang warning;
assert(!c.t.is_conflicted());
return *mut;
});
}
eagain_ifuture<> retire(context_t c) {
LOG_PREFIX(OTree::Extent::retire);
assert(!is_retired());
auto addr = extent->get_laddr();
return c.nm.retire_extent(c.t, std::move(extent)
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, addr] {
SUBERRORT(seastore_onode, "EIO -- addr={:x}", c.t, addr);
ceph_abort("fatal error");
}),
crimson::ct_error::enoent::handle(
[FNAME, c, addr] {
SUBERRORT(seastore_onode, "ENOENT -- addr={:x}", c.t, addr);
ceph_abort("fatal error");
})
#ifndef NDEBUG
).si_then([c] {
assert(!c.t.is_conflicted());
}
#endif
);
}
private:
NodeExtentRef extent;
node_stage_t node_stage;
nextent_state_t state;
std::optional<NodeExtentMutable> mut;
// owned by extent
recorder_t* recorder;
#ifndef NDEBUG
// verify record replay using a different memory block
TestReplayExtent::Ref test_extent;
recorder_t* test_recorder;
#endif
};
}
| 21,818 | 34.191935 | 92 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/common/type_helpers.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "fwd.h"
#include "node_extent_mutable.h"
#include "node_types.h"
#include "stages/node_stage_layout.h"
#include "super.h"
/**
* node_extent_manager.h
*
* Contains general interfaces for different backends (Dummy and Seastore).
*/
namespace crimson::os::seastore::onode {
using crimson::os::seastore::LogicalCachedExtent;
class NodeExtent : public LogicalCachedExtent {
public:
virtual ~NodeExtent() = default;
const node_header_t& get_header() const {
return *reinterpret_cast<const node_header_t*>(get_read());
}
const char* get_read() const {
return get_bptr().c_str();
}
NodeExtentMutable get_mutable() {
assert(is_mutable());
return do_get_mutable();
}
virtual DeltaRecorder* get_recorder() const = 0;
virtual NodeExtentRef mutate(context_t, DeltaRecorderURef&&) = 0;
protected:
template <typename... T>
NodeExtent(T&&... t) : LogicalCachedExtent(std::forward<T>(t)...) {}
NodeExtentMutable do_get_mutable() {
return NodeExtentMutable(get_bptr().c_str(), get_length());
}
std::ostream& print_detail_l(std::ostream& out) const final {
return out << ", fltree_header=" << get_header();
}
/**
* Abstracted interfaces to implement:
* - CacheExtent::duplicate_for_write() -> CachedExtentRef
* - CacheExtent::get_type() -> extent_types_t
* - CacheExtent::get_delta() -> ceph::bufferlist
* - LogicalCachedExtent::apply_delta(const ceph::bufferlist) -> void
*/
};
using crimson::os::seastore::TransactionManager;
class NodeExtentManager {
using base_iertr = TransactionManager::base_iertr;
public:
virtual ~NodeExtentManager() = default;
virtual bool is_read_isolated() const = 0;
using read_iertr = base_iertr::extend<
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
virtual read_iertr::future<NodeExtentRef> read_extent(
Transaction&, laddr_t) = 0;
using alloc_iertr = base_iertr;
virtual alloc_iertr::future<NodeExtentRef> alloc_extent(
Transaction&, laddr_t hint, extent_len_t) = 0;
using retire_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
virtual retire_iertr::future<> retire_extent(
Transaction&, NodeExtentRef) = 0;
using getsuper_iertr = base_iertr;
virtual getsuper_iertr::future<Super::URef> get_super(
Transaction&, RootNodeTracker&) = 0;
virtual std::ostream& print(std::ostream& os) const = 0;
static NodeExtentManagerURef create_dummy(bool is_sync);
static NodeExtentManagerURef create_seastore(
TransactionManager &tm, laddr_t min_laddr = L_ADDR_MIN, double p_eagain = 0.0);
};
inline std::ostream& operator<<(std::ostream& os, const NodeExtentManager& nm) {
return nm.print(os);
}
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::onode::NodeExtent> : fmt::ostream_formatter {};
#endif
| 3,120 | 28.443396 | 104 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_mutable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include <cstring>
#include "fwd.h"
#pragma once
namespace crimson::os::seastore::onode {
/**
* NodeExtentMutable
*
* A thin wrapper of NodeExtent to make sure that only the newly allocated
* or the duplicated NodeExtent is mutable, and the memory modifications are
* safe within the extent range.
*/
class NodeExtentMutable {
public:
void copy_in_absolute(void* dst, const void* src, extent_len_t len) {
assert(is_safe(dst, len));
std::memcpy(dst, src, len);
}
template <typename T>
void copy_in_absolute(void* dst, const T& src) {
copy_in_absolute(dst, &src, sizeof(T));
}
const void* copy_in_relative(
extent_len_t dst_offset, const void* src, extent_len_t len) {
auto dst = get_write() + dst_offset;
copy_in_absolute(dst, src, len);
return dst;
}
template <typename T>
const T* copy_in_relative(
extent_len_t dst_offset, const T& src) {
auto dst = copy_in_relative(dst_offset, &src, sizeof(T));
return static_cast<const T*>(dst);
}
void shift_absolute(const void* src, extent_len_t len, int offset) {
assert(is_safe(src, len));
char* to = (char*)src + offset;
assert(is_safe(to, len));
if (len != 0) {
std::memmove(to, src, len);
}
}
void shift_relative(extent_len_t src_offset, extent_len_t len, int offset) {
shift_absolute(get_write() + src_offset, len, offset);
}
void set_absolute(void* dst, int value, extent_len_t len) {
assert(is_safe(dst, len));
std::memset(dst, value, len);
}
void set_relative(extent_len_t dst_offset, int value, extent_len_t len) {
auto dst = get_write() + dst_offset;
set_absolute(dst, value, len);
}
template <typename T>
void validate_inplace_update(const T& updated) {
assert(is_safe(&updated, sizeof(T)));
}
const char* get_read() const { return p_start; }
char* get_write() { return p_start; }
extent_len_t get_length() const {
#ifndef NDEBUG
if (node_offset == 0) {
assert(is_valid_node_size(length));
}
#endif
return length;
}
node_offset_t get_node_offset() const { return node_offset; }
NodeExtentMutable get_mutable_absolute(const void* dst, node_offset_t len) const {
assert(node_offset == 0);
assert(is_safe(dst, len));
assert((const char*)dst != get_read());
auto ret = *this;
node_offset_t offset = (const char*)dst - get_read();
assert(offset != 0);
ret.p_start += offset;
ret.length = len;
ret.node_offset = offset;
return ret;
}
NodeExtentMutable get_mutable_relative(
node_offset_t offset, node_offset_t len) const {
return get_mutable_absolute(get_read() + offset, len);
}
private:
NodeExtentMutable(char* p_start, extent_len_t length)
: p_start{p_start}, length{length} {}
bool is_safe(const void* src, extent_len_t len) const {
return ((const char*)src >= p_start) &&
((const char*)src + len <= p_start + length);
}
char* p_start;
extent_len_t length;
node_offset_t node_offset = 0;
friend class NodeExtent;
};
}
| 3,146 | 26.605263 | 84 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_impl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <ostream>
#include "node_extent_mutable.h"
#include "node_types.h"
#include "stages/stage_types.h"
namespace crimson::os::seastore::onode {
#ifdef UNIT_TESTS_BUILT
enum class InsertType { BEGIN, LAST, MID };
struct split_expectation_t {
match_stage_t split_stage;
match_stage_t insert_stage;
bool is_insert_left;
InsertType insert_type;
};
struct last_split_info_t {
search_position_t split_pos;
match_stage_t insert_stage;
bool is_insert_left;
InsertType insert_type;
bool match(const split_expectation_t& e) const {
match_stage_t split_stage;
if (split_pos.nxt.nxt.index == 0) {
if (split_pos.nxt.index == 0) {
split_stage = 2;
} else {
split_stage = 1;
}
} else {
split_stage = 0;
}
return split_stage == e.split_stage &&
insert_stage == e.insert_stage &&
is_insert_left == e.is_insert_left &&
insert_type == e.insert_type;
}
bool match_split_pos(const search_position_t& pos) const {
return split_pos == pos;
}
};
extern last_split_info_t last_split;
#endif
struct key_hobj_t;
struct key_view_t;
class NodeExtentMutable;
/**
* NodeImpl
*
* Hides type specific node layout implementations for Node.
*/
class NodeImpl {
public:
virtual ~NodeImpl() = default;
virtual node_type_t node_type() const = 0;
virtual field_type_t field_type() const = 0;
virtual laddr_t laddr() const = 0;
virtual const char* read() const = 0;
virtual extent_len_t get_node_size() const = 0;
virtual nextent_state_t get_extent_state() const = 0;
virtual void prepare_mutate(context_t) = 0;
virtual bool is_level_tail() const = 0;
/* Invariants for num_keys and num_values:
* - for leaf node and non-tail internal node, num_keys == num_values;
* - for tail internal node, num_keys + 1 == num_values;
* - all node must have at least 1 value, except the root leaf node;
* - the root internal node must have more than 1 values;
*/
virtual void validate_non_empty() const = 0;
virtual bool is_keys_empty() const = 0;
// under the assumption that node is not empty
virtual bool has_single_value() const = 0;
virtual level_t level() const = 0;
virtual node_offset_t free_size() const = 0;
virtual extent_len_t total_size() const = 0;
virtual bool is_extent_retired() const = 0;
virtual std::optional<key_view_t> get_pivot_index() const = 0;
virtual bool is_size_underflow() const = 0;
virtual std::tuple<match_stage_t, search_position_t> erase(const search_position_t&) = 0;
virtual std::tuple<match_stage_t, std::size_t> evaluate_merge(NodeImpl&) = 0;
virtual search_position_t merge(NodeExtentMutable&, NodeImpl&, match_stage_t, extent_len_t) = 0;
virtual eagain_ifuture<NodeExtentMutable> rebuild_extent(context_t) = 0;
virtual eagain_ifuture<> retire_extent(context_t) = 0;
virtual search_position_t make_tail() = 0;
virtual node_stats_t get_stats() const = 0;
virtual std::ostream& dump(std::ostream&) const = 0;
virtual std::ostream& dump_brief(std::ostream&) const = 0;
virtual const std::string& get_name() const = 0;
virtual void validate_layout() const = 0;
virtual void test_copy_to(NodeExtentMutable&) const = 0;
virtual void test_set_tail(NodeExtentMutable&) = 0;
protected:
NodeImpl() = default;
};
/**
* InternalNodeImpl
*
* Hides type specific node layout implementations for InternalNode.
*/
class InternalNodeImpl : public NodeImpl {
public:
struct internal_marker_t {};
virtual ~InternalNodeImpl() = default;
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_slot(const search_position_t&, // IN
key_view_t* = nullptr, // OUT
const laddr_packed_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_prev_slot(search_position_t&, // IN&OUT
key_view_t* = nullptr, // OUT
const laddr_packed_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_next_slot(search_position_t&, // IN&OUT
key_view_t* = nullptr, // OUT
const laddr_packed_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_largest_slot(search_position_t* = nullptr, // OUT
key_view_t* = nullptr, // OUT
const laddr_packed_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual lookup_result_t<node_type_t::INTERNAL> lower_bound(
const key_hobj_t&, MatchHistory&,
key_view_t* = nullptr, internal_marker_t = {}) const {
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual const laddr_packed_t* insert(
const key_view_t&, const laddr_t&, search_position_t&, match_stage_t&, node_offset_t&) {
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual std::tuple<search_position_t, bool, const laddr_packed_t*> split_insert(
NodeExtentMutable&, NodeImpl&, const key_view_t&, const laddr_t&,
search_position_t&, match_stage_t&, node_offset_t&) {
ceph_abort("impossible path");
}
virtual const laddr_packed_t* get_tail_value() const = 0;
virtual void replace_child_addr(const search_position_t&, laddr_t dst, laddr_t src) = 0;
virtual std::tuple<match_stage_t, node_offset_t> evaluate_insert(
const key_view_t&, const laddr_t&, search_position_t&) const = 0;
struct fresh_impl_t {
InternalNodeImplURef impl;
NodeExtentMutable mut;
std::pair<NodeImplURef, NodeExtentMutable> make_pair() {
return {std::move(impl), mut};
}
};
static eagain_ifuture<fresh_impl_t> allocate(context_t, laddr_t, field_type_t, bool, level_t);
static InternalNodeImplURef load(NodeExtentRef, field_type_t);
protected:
InternalNodeImpl() = default;
};
/**
* LeafNodeImpl
*
* Hides type specific node layout implementations for LeafNode.
*/
class LeafNodeImpl : public NodeImpl {
public:
struct leaf_marker_t {};
virtual ~LeafNodeImpl() = default;
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_slot(const search_position_t&, // IN
key_view_t* = nullptr, // OUT
const value_header_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_prev_slot(search_position_t&, // IN&OUT
key_view_t* = nullptr, // OUT
const value_header_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_next_slot(search_position_t&, // IN&OUT
key_view_t* = nullptr, // OUT
const value_header_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_largest_slot(search_position_t* = nullptr, // OUT
key_view_t* = nullptr, // OUT
const value_header_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual lookup_result_t<node_type_t::LEAF> lower_bound(
const key_hobj_t&, MatchHistory&,
key_view_t* = nullptr, leaf_marker_t = {}) const {
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual const value_header_t* insert(
const key_hobj_t&, const value_config_t&, search_position_t&, match_stage_t&, node_offset_t&) {
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual std::tuple<search_position_t, bool, const value_header_t*> split_insert(
NodeExtentMutable&, NodeImpl&, const key_hobj_t&, const value_config_t&,
search_position_t&, match_stage_t&, node_offset_t&) {
ceph_abort("impossible path");
}
virtual std::tuple<match_stage_t, node_offset_t> evaluate_insert(
const key_hobj_t&, const value_config_t&,
const MatchHistory&, match_stat_t, search_position_t&) const = 0;
virtual std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t) = 0;
struct fresh_impl_t {
LeafNodeImplURef impl;
NodeExtentMutable mut;
std::pair<NodeImplURef, NodeExtentMutable> make_pair() {
return {std::move(impl), mut};
}
};
static eagain_ifuture<fresh_impl_t> allocate(context_t, laddr_t, field_type_t, bool);
static LeafNodeImplURef load(NodeExtentRef, field_type_t);
protected:
LeafNodeImpl() = default;
};
}
| 9,486 | 34.00738 | 101 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_layout_replayable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "node_extent_mutable.h"
#include "stages/node_stage.h"
#include "stages/stage.h"
namespace crimson::os::seastore::onode {
/**
* NodeLayoutReplayableT
*
* Contains templated logics to modify the layout of a NodeExtend which are
* also replayable. Used by NodeExtentAccessorT at runtime and by
* DeltaRecorderT during replay.
*/
template <typename FieldType, node_type_t NODE_TYPE>
struct NodeLayoutReplayableT {
using node_stage_t = node_extent_t<FieldType, NODE_TYPE>;
using stage_t = node_to_stage_t<node_stage_t>;
using position_t = typename stage_t::position_t;
using StagedIterator = typename stage_t::StagedIterator;
using value_input_t = value_input_type_t<NODE_TYPE>;
using value_t = value_type_t<NODE_TYPE>;
static constexpr auto FIELD_TYPE = FieldType::FIELD_TYPE;
template <KeyT KT>
static const value_t* insert(
NodeExtentMutable& mut,
const node_stage_t& node_stage,
const full_key_t<KT>& key,
const value_input_t& value,
position_t& insert_pos,
match_stage_t& insert_stage,
node_offset_t& insert_size) {
auto p_value = stage_t::template proceed_insert<KT, false>(
mut, node_stage, key, value, insert_pos, insert_stage, insert_size);
return p_value;
}
static void split(
NodeExtentMutable& mut,
const node_stage_t& node_stage,
StagedIterator& split_at) {
node_stage_t::update_is_level_tail(mut, node_stage, false);
stage_t::trim(mut, split_at);
}
template <KeyT KT>
static const value_t* split_insert(
NodeExtentMutable& mut,
const node_stage_t& node_stage,
StagedIterator& split_at,
const full_key_t<KT>& key,
const value_input_t& value,
position_t& insert_pos,
match_stage_t& insert_stage,
node_offset_t& insert_size) {
node_stage_t::update_is_level_tail(mut, node_stage, false);
stage_t::trim(mut, split_at);
auto p_value = stage_t::template proceed_insert<KT, true>(
mut, node_stage, key, value, insert_pos, insert_stage, insert_size);
return p_value;
}
static void update_child_addr(
NodeExtentMutable& mut, const laddr_t new_addr, laddr_packed_t* p_addr) {
assert(NODE_TYPE == node_type_t::INTERNAL);
mut.copy_in_absolute(p_addr, new_addr);
}
static std::tuple<match_stage_t, position_t> erase(
NodeExtentMutable& mut,
const node_stage_t& node_stage,
const position_t& _erase_pos) {
if (_erase_pos.is_end()) {
// must be internal node
assert(node_stage.is_level_tail());
// return erase_stage, last_pos
return update_last_to_tail(mut, node_stage);
}
assert(node_stage.keys() != 0);
position_t erase_pos = _erase_pos;
auto erase_stage = stage_t::erase(mut, node_stage, erase_pos);
// return erase_stage, next_pos
return {erase_stage, erase_pos};
}
static position_t make_tail(
NodeExtentMutable& mut,
const node_stage_t& node_stage) {
assert(!node_stage.is_level_tail());
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
auto [r_stage, r_last_pos] = update_last_to_tail(mut, node_stage);
std::ignore = r_stage;
return r_last_pos;
} else {
node_stage_t::update_is_level_tail(mut, node_stage, true);
// no need to calculate the last pos
return position_t::end();
}
}
private:
static std::tuple<match_stage_t, position_t> update_last_to_tail(
NodeExtentMutable& mut,
const node_stage_t& node_stage) {
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
assert(node_stage.keys() != 0);
position_t last_pos;
laddr_t last_value;
{
const laddr_packed_t* p_last_value;
stage_t::template get_largest_slot<true, false, true>(
node_stage, &last_pos, nullptr, &p_last_value);
last_value = p_last_value->value;
}
auto erase_pos = last_pos;
auto erase_stage = stage_t::erase(mut, node_stage, erase_pos);
assert(erase_pos.is_end());
node_stage_t::update_is_level_tail(mut, node_stage, true);
auto p_last_value = const_cast<laddr_packed_t*>(
node_stage.get_end_p_laddr());
mut.copy_in_absolute(p_last_value, last_value);
// return erase_stage, last_pos
return {erase_stage, last_pos};
} else {
ceph_abort("impossible path");
}
}
};
}
| 4,486 | 31.280576 | 79 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cassert>
#include <ostream>
#include "fwd.h"
namespace crimson::os::seastore::onode {
constexpr uint8_t FIELD_TYPE_MAGIC = 0x25;
enum class field_type_t : uint8_t {
N0 = FIELD_TYPE_MAGIC,
N1,
N2,
N3,
_MAX
};
inline uint8_t to_unsigned(field_type_t type) {
auto value = static_cast<uint8_t>(type);
assert(value >= FIELD_TYPE_MAGIC);
assert(value < static_cast<uint8_t>(field_type_t::_MAX));
return value - FIELD_TYPE_MAGIC;
}
inline std::ostream& operator<<(std::ostream &os, field_type_t type) {
const char* const names[] = {"0", "1", "2", "3"};
auto index = to_unsigned(type);
os << names[index];
return os;
}
enum class node_type_t : uint8_t {
LEAF = 0,
INTERNAL
};
inline std::ostream& operator<<(std::ostream &os, const node_type_t& type) {
const char* const names[] = {"L", "I"};
auto index = static_cast<uint8_t>(type);
assert(index <= 1u);
os << names[index];
return os;
}
struct laddr_packed_t {
laddr_t value;
} __attribute__((packed));
inline std::ostream& operator<<(std::ostream& os, const laddr_packed_t& laddr) {
return os << "laddr_packed(0x" << std::hex << laddr.value << std::dec << ")";
}
using match_stat_t = int8_t;
constexpr match_stat_t MSTAT_END = -2; // index is search_position_t::end()
constexpr match_stat_t MSTAT_EQ = -1; // key == index
constexpr match_stat_t MSTAT_LT0 = 0; // key == index [pool/shard crush ns/oid]; key < index [snap/gen]
constexpr match_stat_t MSTAT_LT1 = 1; // key == index [pool/shard crush]; key < index [ns/oid]
constexpr match_stat_t MSTAT_LT2 = 2; // key < index [pool/shard crush ns/oid] ||
// key == index [pool/shard]; key < index [crush]
constexpr match_stat_t MSTAT_LT3 = 3; // key < index [pool/shard]
constexpr match_stat_t MSTAT_MIN = MSTAT_END;
constexpr match_stat_t MSTAT_MAX = MSTAT_LT3;
enum class node_delta_op_t : uint8_t {
INSERT,
SPLIT,
SPLIT_INSERT,
UPDATE_CHILD_ADDR,
ERASE,
MAKE_TAIL,
SUBOP_UPDATE_VALUE = 0xff,
};
/** nextent_state_t
*
* The possible states of tree node extent(NodeExtentAccessorT).
*
* State transition implies the following capabilities is changed:
* - mutability is changed;
* - whether to record;
* - memory has been copied;
*
* load()----+
* |
* alloc() v
* | +--> [READ_ONLY] ---------+
* | | | |
* | | prepare_mutate() |
* | | | |
* | v v v
* | +--> [MUTATION_PENDING]---+
* | | |
* | | rebuild()
* | | |
* | v v
* +------->+--> [FRESH] <------------+
*
* Note that NodeExtentAccessorT might still be MUTATION_PENDING/FRESH while
* the internal extent has become DIRTY after the transaction submission is
* started while nodes destruction and validation has not been completed yet.
*/
enum class nextent_state_t : uint8_t {
READ_ONLY = 0, // requires mutate for recording
// CLEAN/DIRTY
MUTATION_PENDING, // can mutate, needs recording
// MUTATION_PENDING
FRESH, // can mutate, no recording
// INITIAL_WRITE_PENDING
};
}
template <> struct fmt::formatter<crimson::os::seastore::onode::node_delta_op_t>
: fmt::formatter<std::string_view> {
using node_delta_op_t = crimson::os::seastore::onode::node_delta_op_t;
// parse is inherited from formatter<string_view>.
template <typename FormatContext>
auto format(node_delta_op_t op, FormatContext& ctx) {
std::string_view name = "unknown";
switch (op) {
case node_delta_op_t::INSERT:
name = "insert";
break;
case node_delta_op_t::SPLIT:
name = "split";
break;
case node_delta_op_t::SPLIT_INSERT:
name = "split_insert";
break;
case node_delta_op_t::UPDATE_CHILD_ADDR:
name = "update_child_addr";
break;
case node_delta_op_t::ERASE:
name = "erase";
break;
case node_delta_op_t::MAKE_TAIL:
name = "make_tail";
break;
case node_delta_op_t::SUBOP_UPDATE_VALUE:
name = "subop_update_value";
break;
}
return formatter<string_view>::format(name, ctx);
}
};
| 4,460 | 29.554795 | 104 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/super.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include "crimson/common/type_helpers.h"
#include "fwd.h"
namespace crimson::os::seastore::onode {
class Node;
class Super;
/**
* RootNodeTracker
*
* An abstracted tracker to get the root node by Transaction.
*/
class RootNodeTracker {
public:
virtual ~RootNodeTracker() = default;
virtual bool is_clean() const = 0;
virtual Ref<Node> get_root(Transaction&) const = 0;
static RootNodeTrackerURef create(bool read_isolated);
protected:
RootNodeTracker() = default;
RootNodeTracker(const RootNodeTracker&) = delete;
RootNodeTracker(RootNodeTracker&&) = delete;
RootNodeTracker& operator=(const RootNodeTracker&) = delete;
RootNodeTracker& operator=(RootNodeTracker&&) = delete;
virtual void do_track_super(Transaction&, Super&) = 0;
virtual void do_untrack_super(Transaction&, Super&) = 0;
friend class Super;
};
/**
* Super
*
* The parent of root node. It contains the relationship between a Transaction
* and a root node address.
*/
class Super {
public:
using URef = std::unique_ptr<Super>;
Super(const Super&) = delete;
Super(Super&&) = delete;
Super& operator=(const Super&) = delete;
Super& operator=(Super&&) = delete;
virtual ~Super() {
assert(tracked_root_node == nullptr);
tracker.do_untrack_super(t, *this);
}
virtual laddr_t get_root_laddr() const = 0;
virtual void write_root_laddr(context_t, laddr_t) = 0;
void do_track_root(Node& root) {
assert(tracked_root_node == nullptr);
tracked_root_node = &root;
}
void do_untrack_root(Node& root) {
assert(tracked_root_node == &root);
tracked_root_node = nullptr;
}
Node* get_p_root() const {
assert(tracked_root_node != nullptr);
return tracked_root_node;
}
protected:
Super(Transaction& t, RootNodeTracker& tracker)
: t{t}, tracker{tracker} {
tracker.do_track_super(t, *this);
}
private:
Transaction& t;
RootNodeTracker& tracker;
Node* tracked_root_node = nullptr;
};
/**
* RootNodeTrackerIsolated
*
* A concrete RootNodeTracker implementation which provides root node isolation
* between Transactions for Seastore backend.
*/
class RootNodeTrackerIsolated final : public RootNodeTracker {
public:
~RootNodeTrackerIsolated() override { assert(is_clean()); }
protected:
bool is_clean() const override {
return tracked_supers.empty();
}
void do_track_super(Transaction& t, Super& super) override {
assert(tracked_supers.find(&t) == tracked_supers.end());
tracked_supers[&t] = &super;
}
void do_untrack_super(Transaction& t, Super& super) override {
[[maybe_unused]] auto removed = tracked_supers.erase(&t);
assert(removed);
}
::Ref<Node> get_root(Transaction& t) const override;
std::map<Transaction*, Super*> tracked_supers;
};
/**
* RootNodeTrackerShared
*
* A concrete RootNodeTracker implementation which has no isolation between
* Transactions for Dummy backend.
*/
class RootNodeTrackerShared final : public RootNodeTracker {
public:
~RootNodeTrackerShared() override { assert(is_clean()); }
protected:
bool is_clean() const override {
return tracked_super == nullptr;
}
void do_track_super(Transaction&, Super& super) override {
assert(is_clean());
tracked_super = &super;
}
void do_untrack_super(Transaction&, Super& super) override {
assert(tracked_super == &super);
tracked_super = nullptr;
}
::Ref<Node> get_root(Transaction&) const override;
Super* tracked_super = nullptr;
};
inline RootNodeTrackerURef RootNodeTracker::create(bool read_isolated) {
if (read_isolated) {
return RootNodeTrackerURef(new RootNodeTrackerIsolated());
} else {
return RootNodeTrackerURef(new RootNodeTrackerShared());
}
}
}
| 3,839 | 25.666667 | 79 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/tree.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <ostream>
#include "common/hobject.h"
#include "crimson/common/type_helpers.h"
#include "crimson/os/seastore/logging.h"
#include "fwd.h"
#include "node.h"
#include "node_extent_manager.h"
#include "stages/key_layout.h"
#include "super.h"
#include "value.h"
/**
* tree.h
*
* A special-purpose and b-tree-based implementation that:
* - Fulfills requirements of OnodeManager to index ordered onode key-values;
* - Runs above seastore block and transaction layer;
* - Specially optimized for onode key structures and seastore
* delta/transaction semantics;
*
* Note: Cursor/Value are transactional, they cannot be used outside the scope
* of the according transaction, or the behavior is undefined.
*/
namespace crimson::os::seastore::onode {
class Node;
class tree_cursor_t;
template <typename ValueImpl>
class Btree {
public:
Btree(NodeExtentManagerURef&& _nm)
: nm{std::move(_nm)},
root_tracker{RootNodeTracker::create(nm->is_read_isolated())} {}
~Btree() { assert(root_tracker->is_clean()); }
Btree(const Btree&) = delete;
Btree(Btree&&) = delete;
Btree& operator=(const Btree&) = delete;
Btree& operator=(Btree&&) = delete;
eagain_ifuture<> mkfs(Transaction& t) {
return Node::mkfs(get_context(t), *root_tracker);
}
class Cursor {
public:
Cursor(const Cursor&) = default;
Cursor(Cursor&&) noexcept = default;
Cursor& operator=(const Cursor&) = default;
Cursor& operator=(Cursor&&) = default;
~Cursor() = default;
bool is_end() const {
if (p_cursor->is_tracked()) {
return false;
} else if (p_cursor->is_invalid()) {
return true;
} else {
// we don't actually store end cursor because it will hold a reference
// to an end leaf node and is not kept updated.
assert(p_cursor->is_end());
ceph_abort("impossible");
}
}
/// Invalidate the Cursor before submitting transaction.
void invalidate() {
p_cursor.reset();
}
// XXX: return key_view_t to avoid unecessary ghobject_t constructions
ghobject_t get_ghobj() const {
assert(!is_end());
auto view = p_cursor->get_key_view(
p_tree->value_builder.get_header_magic());
assert(view.nspace().size() <=
p_tree->value_builder.get_max_ns_size());
assert(view.oid().size() <=
p_tree->value_builder.get_max_oid_size());
return view.to_ghobj();
}
ValueImpl value() {
assert(!is_end());
return p_tree->value_builder.build_value(
*p_tree->nm, p_tree->value_builder, p_cursor);
}
bool operator==(const Cursor& o) const { return operator<=>(o) == 0; }
eagain_ifuture<Cursor> get_next(Transaction& t) {
assert(!is_end());
auto this_obj = *this;
return p_cursor->get_next(p_tree->get_context(t)
).si_then([this_obj] (Ref<tree_cursor_t> next_cursor) {
next_cursor->assert_next_to(
*this_obj.p_cursor, this_obj.p_tree->value_builder.get_header_magic());
auto ret = Cursor{this_obj.p_tree, next_cursor};
assert(this_obj < ret);
return ret;
});
}
template <bool FORCE_MERGE = false>
eagain_ifuture<Cursor> erase(Transaction& t) {
assert(!is_end());
auto this_obj = *this;
return p_cursor->erase<FORCE_MERGE>(p_tree->get_context(t), true
).si_then([this_obj, this] (Ref<tree_cursor_t> next_cursor) {
assert(p_cursor->is_invalid());
if (next_cursor) {
assert(!next_cursor->is_end());
return Cursor{p_tree, next_cursor};
} else {
return Cursor{p_tree};
}
});
}
private:
Cursor(Btree* p_tree, Ref<tree_cursor_t> _p_cursor) : p_tree(p_tree) {
if (_p_cursor->is_invalid()) {
// we don't create Cursor from an invalid tree_cursor_t.
ceph_abort("impossible");
} else if (_p_cursor->is_end()) {
// we don't actually store end cursor because it will hold a reference
// to an end leaf node and is not kept updated.
} else {
assert(_p_cursor->is_tracked());
p_cursor = _p_cursor;
}
}
Cursor(Btree* p_tree) : p_tree{p_tree} {}
std::strong_ordering operator<=>(const Cursor& o) const {
assert(p_tree == o.p_tree);
return p_cursor->compare_to(
*o.p_cursor, p_tree->value_builder.get_header_magic());
}
static Cursor make_end(Btree* p_tree) {
return {p_tree};
}
Btree* p_tree;
Ref<tree_cursor_t> p_cursor = tree_cursor_t::get_invalid();
friend class Btree;
};
/*
* lookup
*/
eagain_ifuture<Cursor> begin(Transaction& t) {
return get_root(t).si_then([this, &t](auto root) {
return root->lookup_smallest(get_context(t));
}).si_then([this](auto cursor) {
return Cursor{this, cursor};
});
}
eagain_ifuture<Cursor> last(Transaction& t) {
return get_root(t).si_then([this, &t](auto root) {
return root->lookup_largest(get_context(t));
}).si_then([this](auto cursor) {
return Cursor(this, cursor);
});
}
Cursor end() {
return Cursor::make_end(this);
}
eagain_ifuture<bool> contains(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
key_hobj_t{obj},
[this, &t](auto& key) -> eagain_ifuture<bool> {
return get_root(t).si_then([this, &t, &key](auto root) {
// TODO: improve lower_bound()
return root->lower_bound(get_context(t), key);
}).si_then([](auto result) {
return MatchKindBS::EQ == result.match();
});
}
);
}
eagain_ifuture<Cursor> find(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
key_hobj_t{obj},
[this, &t](auto& key) -> eagain_ifuture<Cursor> {
return get_root(t).si_then([this, &t, &key](auto root) {
// TODO: improve lower_bound()
return root->lower_bound(get_context(t), key);
}).si_then([this](auto result) {
if (result.match() == MatchKindBS::EQ) {
return Cursor(this, result.p_cursor);
} else {
return Cursor::make_end(this);
}
});
}
);
}
/**
* lower_bound
*
* Returns a Cursor pointing to the element that is equal to the key, or the
* first element larger than the key, or the end Cursor if that element
* doesn't exist.
*/
eagain_ifuture<Cursor> lower_bound(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
key_hobj_t{obj},
[this, &t](auto& key) -> eagain_ifuture<Cursor> {
return get_root(t).si_then([this, &t, &key](auto root) {
return root->lower_bound(get_context(t), key);
}).si_then([this](auto result) {
return Cursor(this, result.p_cursor);
});
}
);
}
eagain_ifuture<Cursor> get_next(Transaction& t, Cursor& cursor) {
return cursor.get_next(t);
}
/*
* modifiers
*/
struct tree_value_config_t {
value_size_t payload_size = 256;
};
using insert_iertr = eagain_iertr::extend<
crimson::ct_error::value_too_large>;
insert_iertr::future<std::pair<Cursor, bool>>
insert(Transaction& t, const ghobject_t& obj, tree_value_config_t _vconf) {
LOG_PREFIX(OTree::insert);
if (_vconf.payload_size > value_builder.get_max_value_payload_size()) {
SUBERRORT(seastore_onode, "value payload size {} too large to insert {}",
t, _vconf.payload_size, key_hobj_t{obj});
return crimson::ct_error::value_too_large::make();
}
if (obj.hobj.nspace.size() > value_builder.get_max_ns_size()) {
SUBERRORT(seastore_onode, "namespace size {} too large to insert {}",
t, obj.hobj.nspace.size(), key_hobj_t{obj});
return crimson::ct_error::value_too_large::make();
}
if (obj.hobj.oid.name.size() > value_builder.get_max_oid_size()) {
SUBERRORT(seastore_onode, "oid size {} too large to insert {}",
t, obj.hobj.oid.name.size(), key_hobj_t{obj});
return crimson::ct_error::value_too_large::make();
}
value_config_t vconf{value_builder.get_header_magic(), _vconf.payload_size};
return seastar::do_with(
key_hobj_t{obj},
[this, &t, vconf](auto& key) -> eagain_ifuture<std::pair<Cursor, bool>> {
ceph_assert(key.is_valid());
return get_root(t).si_then([this, &t, &key, vconf](auto root) {
return root->insert(get_context(t), key, vconf, std::move(root));
}).si_then([this](auto ret) {
auto& [cursor, success] = ret;
return std::make_pair(Cursor(this, cursor), success);
});
}
);
}
eagain_ifuture<std::size_t> erase(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
key_hobj_t{obj},
[this, &t](auto& key) -> eagain_ifuture<std::size_t> {
return get_root(t).si_then([this, &t, &key](auto root) {
return root->erase(get_context(t), key, std::move(root));
});
}
);
}
eagain_ifuture<Cursor> erase(Transaction& t, Cursor& pos) {
return pos.erase(t);
}
eagain_ifuture<> erase(Transaction& t, Value& value) {
assert(value.is_tracked());
auto ref_cursor = value.p_cursor;
return ref_cursor->erase(get_context(t), false
).si_then([ref_cursor] (auto next_cursor) {
assert(ref_cursor->is_invalid());
assert(!next_cursor);
});
}
/*
* stats
*/
eagain_ifuture<size_t> height(Transaction& t) {
return get_root(t).si_then([](auto root) {
return size_t(root->level() + 1);
});
}
eagain_ifuture<tree_stats_t> get_stats_slow(Transaction& t) {
return get_root(t).si_then([this, &t](auto root) {
unsigned height = root->level() + 1;
return root->get_tree_stats(get_context(t)
).si_then([height](auto stats) {
stats.height = height;
return seastar::make_ready_future<tree_stats_t>(stats);
});
});
}
std::ostream& dump(Transaction& t, std::ostream& os) {
auto root = root_tracker->get_root(t);
if (root) {
root->dump(os);
} else {
os << "empty tree!";
}
return os;
}
std::ostream& print(std::ostream& os) const {
return os << "BTree-" << *nm;
}
/*
* test_only
*/
bool test_is_clean() const {
return root_tracker->is_clean();
}
eagain_ifuture<> test_clone_from(
Transaction& t, Transaction& t_from, Btree& from) {
// Note: assume the tree to clone is tracked correctly in memory.
// In some unit tests, parts of the tree are stubbed out that they
// should not be loaded from NodeExtentManager.
return from.get_root(t_from
).si_then([this, &t](auto root_from) {
return root_from->test_clone_root(get_context(t), *root_tracker);
});
}
private:
context_t get_context(Transaction& t) {
return {*nm, value_builder, t};
}
eagain_ifuture<Ref<Node>> get_root(Transaction& t) {
auto root = root_tracker->get_root(t);
if (root) {
return seastar::make_ready_future<Ref<Node>>(root);
} else {
return Node::load_root(get_context(t), *root_tracker);
}
}
NodeExtentManagerURef nm;
const ValueBuilderImpl<ValueImpl> value_builder;
RootNodeTrackerURef root_tracker;
friend class DummyChildPool;
};
template <typename ValueImpl>
inline std::ostream& operator<<(std::ostream& os, const Btree<ValueImpl>& tree) {
return tree.print(os);
}
}
| 11,585 | 28.860825 | 83 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/tree_utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cassert>
#include <cstring>
#include <random>
#include <string>
#include <sstream>
#include <utility>
#include <vector>
#include <seastar/core/thread.hh>
#include "crimson/common/log.h"
#include "stages/key_layout.h"
#include "tree.h"
/**
* tree_utils.h
*
* Contains shared logic for unit tests and perf tool.
*/
namespace crimson::os::seastore::onode {
/**
* templates to work with tree utility classes:
*
* struct ValueItem {
* <public members>
*
* value_size_t get_payload_size() const;
* static ValueItem create(std::size_t expected_size, std::size_t id);
* };
* std::ostream& operator<<(std::ostream& os, const ValueItem& item);
*
* class ValueImpl final : public Value {
* ...
*
* using item_t = ValueItem;
* void initialize(Transaction& t, const item_t& item);
* void validate(const item_t& item);
* };
*
*/
template <typename CursorType>
void initialize_cursor_from_item(
Transaction& t,
const ghobject_t& key,
const typename decltype(std::declval<CursorType>().value())::item_t& item,
CursorType& cursor,
bool insert_success) {
ceph_assert(insert_success);
ceph_assert(!cursor.is_end());
ceph_assert(cursor.get_ghobj() == key);
auto tree_value = cursor.value();
tree_value.initialize(t, item);
}
template <typename CursorType>
void validate_cursor_from_item(
const ghobject_t& key,
const typename decltype(std::declval<CursorType>().value())::item_t& item,
CursorType& cursor) {
ceph_assert(!cursor.is_end());
ceph_assert(cursor.get_ghobj() == key);
auto tree_value = cursor.value();
tree_value.validate(item);
}
template <typename ValueItem>
class Values {
public:
Values(size_t n) {
for (size_t i = 1; i <= n; ++i) {
auto item = create(i * 8);
values.push_back(item);
}
}
Values(std::vector<size_t> sizes) {
for (auto& size : sizes) {
auto item = create(size);
values.push_back(item);
}
}
~Values() = default;
ValueItem create(size_t size) {
return ValueItem::create(size, id++);
}
ValueItem pick() const {
auto index = rd() % values.size();
return values[index];
}
private:
std::size_t id = 0;
mutable std::random_device rd;
std::vector<ValueItem> values;
};
template <typename ValueItem>
class KVPool {
public:
struct kv_t {
ghobject_t key;
ValueItem value;
};
using kv_vector_t = std::vector<kv_t>;
using kvptr_vector_t = std::vector<kv_t*>;
using iterator_t = typename kvptr_vector_t::iterator;
size_t size() const {
return kvs.size();
}
iterator_t begin() {
return serial_p_kvs.begin();
}
iterator_t end() {
return serial_p_kvs.end();
}
iterator_t random_begin() {
return random_p_kvs.begin();
}
iterator_t random_end() {
return random_p_kvs.end();
}
void shuffle() {
std::shuffle(random_p_kvs.begin(), random_p_kvs.end(), std::default_random_engine{});
}
void erase_from_random(iterator_t begin, iterator_t end) {
random_p_kvs.erase(begin, end);
kv_vector_t new_kvs;
for (auto p_kv : random_p_kvs) {
new_kvs.emplace_back(*p_kv);
}
std::sort(new_kvs.begin(), new_kvs.end(), [](auto& l, auto& r) {
return l.key < r.key;
});
kvs.swap(new_kvs);
serial_p_kvs.resize(kvs.size());
random_p_kvs.resize(kvs.size());
init();
}
static KVPool create_raw_range(
const std::vector<size_t>& ns_sizes,
const std::vector<size_t>& oid_sizes,
const std::vector<size_t>& value_sizes,
const std::pair<index_t, index_t>& range2,
const std::pair<index_t, index_t>& range1,
const std::pair<index_t, index_t>& range0) {
ceph_assert(range2.first < range2.second);
ceph_assert(range2.second - 1 <= MAX_SHARD);
ceph_assert(range2.second - 1 <= MAX_CRUSH);
ceph_assert(range1.first < range1.second);
ceph_assert(range1.second - 1 <= 9);
ceph_assert(range0.first < range0.second);
kv_vector_t kvs;
std::random_device rd;
Values<ValueItem> values{value_sizes};
for (index_t i = range2.first; i < range2.second; ++i) {
for (index_t j = range1.first; j < range1.second; ++j) {
size_t ns_size;
size_t oid_size;
if (j == 0) {
// store ns0, oid0 as empty strings for test purposes
ns_size = 0;
oid_size = 0;
} else {
ns_size = ns_sizes[rd() % ns_sizes.size()];
oid_size = oid_sizes[rd() % oid_sizes.size()];
assert(ns_size && oid_size);
}
for (index_t k = range0.first; k < range0.second; ++k) {
kvs.emplace_back(
kv_t{make_raw_oid(i, j, k, ns_size, oid_size), values.pick()}
);
}
}
}
return KVPool(std::move(kvs));
}
static KVPool create_range(
const std::pair<index_t, index_t>& range_i,
const std::vector<size_t>& value_sizes,
const uint64_t block_size) {
kv_vector_t kvs;
std::random_device rd;
for (index_t i = range_i.first; i < range_i.second; ++i) {
auto value_size = value_sizes[rd() % value_sizes.size()];
kvs.emplace_back(
kv_t{make_oid(i), ValueItem::create(value_size, i, block_size)}
);
}
return KVPool(std::move(kvs));
}
private:
KVPool(kv_vector_t&& _kvs)
: kvs(std::move(_kvs)), serial_p_kvs(kvs.size()), random_p_kvs(kvs.size()) {
init();
}
void init() {
std::transform(kvs.begin(), kvs.end(), serial_p_kvs.begin(),
[] (kv_t& item) { return &item; });
std::transform(kvs.begin(), kvs.end(), random_p_kvs.begin(),
[] (kv_t& item) { return &item; });
shuffle();
}
static ghobject_t make_raw_oid(
index_t index2, index_t index1, index_t index0,
size_t ns_size, size_t oid_size) {
assert(index1 < 10);
std::ostringstream os_ns;
std::ostringstream os_oid;
if (index1 == 0) {
assert(!ns_size);
assert(!oid_size);
} else {
os_ns << "ns" << index1;
auto current_size = (size_t)os_ns.tellp();
assert(ns_size >= current_size);
os_ns << std::string(ns_size - current_size, '_');
os_oid << "oid" << index1;
current_size = (size_t)os_oid.tellp();
assert(oid_size >= current_size);
os_oid << std::string(oid_size - current_size, '_');
}
return ghobject_t(shard_id_t(index2), index2, index2,
os_ns.str(), os_oid.str(), index0, index0);
}
static ghobject_t make_oid(index_t i) {
std::stringstream ss;
ss << "object_" << i;
auto ret = ghobject_t(
hobject_t(
sobject_t(ss.str(), CEPH_NOSNAP)));
ret.set_shard(shard_id_t(0));
ret.hobj.nspace = "asdf";
return ret;
}
kv_vector_t kvs;
kvptr_vector_t serial_p_kvs;
kvptr_vector_t random_p_kvs;
};
template <bool TRACK, typename ValueImpl>
class TreeBuilder {
public:
using BtreeImpl = Btree<ValueImpl>;
using BtreeCursor = typename BtreeImpl::Cursor;
using ValueItem = typename ValueImpl::item_t;
using iterator_t = typename KVPool<ValueItem>::iterator_t;
TreeBuilder(KVPool<ValueItem>& kvs, NodeExtentManagerURef&& nm)
: kvs{kvs} {
tree.emplace(std::move(nm));
}
eagain_ifuture<> bootstrap(Transaction& t) {
std::ostringstream oss;
#ifndef NDEBUG
oss << "debug=on, ";
#else
oss << "debug=off, ";
#endif
#ifdef UNIT_TESTS_BUILT
oss << "UNIT_TEST_BUILT=on, ";
#else
oss << "UNIT_TEST_BUILT=off, ";
#endif
if constexpr (TRACK) {
oss << "track=on, ";
} else {
oss << "track=off, ";
}
oss << *tree;
logger().warn("TreeBuilder: {}, bootstrapping ...", oss.str());
return tree->mkfs(t);
}
eagain_ifuture<BtreeCursor> insert_one(
Transaction& t, const iterator_t& iter_rd) {
auto p_kv = *iter_rd;
logger().debug("[{}] insert {} -> {}",
iter_rd - kvs.random_begin(),
key_hobj_t{p_kv->key},
p_kv->value);
return tree->insert(
t, p_kv->key, {p_kv->value.get_payload_size()}
).si_then([&t, this, p_kv](auto ret) {
boost::ignore_unused(this); // avoid clang warning;
auto success = ret.second;
auto cursor = std::move(ret.first);
initialize_cursor_from_item(t, p_kv->key, p_kv->value, cursor, success);
#ifndef NDEBUG
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
return tree->find(t, p_kv->key
).si_then([cursor, p_kv](auto cursor_) mutable {
assert(!cursor_.is_end());
ceph_assert(cursor_.get_ghobj() == p_kv->key);
ceph_assert(cursor_.value() == cursor.value());
validate_cursor_from_item(p_kv->key, p_kv->value, cursor_);
return cursor;
});
#else
return eagain_iertr::make_ready_future<BtreeCursor>(cursor);
#endif
}).handle_error_interruptible(
[] (const crimson::ct_error::value_too_large& e) {
ceph_abort("impossible path");
},
crimson::ct_error::pass_further_all{}
);
}
eagain_ifuture<> insert(Transaction& t) {
auto ref_kv_iter = seastar::make_lw_shared<iterator_t>();
*ref_kv_iter = kvs.random_begin();
auto cursors = seastar::make_lw_shared<std::vector<BtreeCursor>>();
logger().warn("start inserting {} kvs ...", kvs.size());
auto start_time = mono_clock::now();
return trans_intr::repeat([&t, this, cursors, ref_kv_iter,
start_time]()
-> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == kvs.random_end()) {
std::chrono::duration<double> duration = mono_clock::now() - start_time;
logger().warn("Insert done! {}s", duration.count());
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
} else {
return insert_one(t, *ref_kv_iter
).si_then([cursors, ref_kv_iter] (auto cursor) {
if constexpr (TRACK) {
cursors->emplace_back(cursor);
}
++(*ref_kv_iter);
return seastar::stop_iteration::no;
});
}
}).si_then([&t, this, cursors, ref_kv_iter] {
if (!cursors->empty()) {
logger().info("Verifing tracked cursors ...");
*ref_kv_iter = kvs.random_begin();
return seastar::do_with(
cursors->begin(),
[&t, this, cursors, ref_kv_iter] (auto& c_iter) {
return trans_intr::repeat(
[&t, this, &c_iter, cursors, ref_kv_iter] ()
-> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == kvs.random_end()) {
logger().info("Verify done!");
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
assert(c_iter != cursors->end());
auto p_kv = **ref_kv_iter;
// validate values in tree keep intact
return tree->find(t, p_kv->key).si_then([&c_iter, ref_kv_iter](auto cursor) {
auto p_kv = **ref_kv_iter;
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
// validate values in cursors keep intact
validate_cursor_from_item(p_kv->key, p_kv->value, *c_iter);
++(*ref_kv_iter);
++c_iter;
return seastar::stop_iteration::no;
});
});
});
} else {
return eagain_iertr::now();
}
});
}
eagain_ifuture<> erase_one(
Transaction& t, const iterator_t& iter_rd) {
auto p_kv = *iter_rd;
logger().debug("[{}] erase {} -> {}",
iter_rd - kvs.random_begin(),
key_hobj_t{p_kv->key},
p_kv->value);
return tree->erase(t, p_kv->key
).si_then([&t, this, p_kv] (auto size) {
boost::ignore_unused(t); // avoid clang warning;
boost::ignore_unused(this);
boost::ignore_unused(p_kv);
ceph_assert(size == 1);
#ifndef NDEBUG
return tree->contains(t, p_kv->key
).si_then([] (bool ret) {
ceph_assert(ret == false);
});
#else
return eagain_iertr::now();
#endif
});
}
eagain_ifuture<> erase(Transaction& t, std::size_t erase_size) {
assert(erase_size <= kvs.size());
kvs.shuffle();
auto erase_end = kvs.random_begin() + erase_size;
auto ref_kv_iter = seastar::make_lw_shared<iterator_t>();
auto cursors = seastar::make_lw_shared<std::map<ghobject_t, BtreeCursor>>();
return eagain_iertr::now().si_then([&t, this, cursors, ref_kv_iter] {
(void)this; // silence clang warning for !TRACK
(void)t; // silence clang warning for !TRACK
if constexpr (TRACK) {
logger().info("Tracking cursors before erase ...");
*ref_kv_iter = kvs.begin();
auto start_time = mono_clock::now();
return trans_intr::repeat(
[&t, this, cursors, ref_kv_iter, start_time] ()
-> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == kvs.end()) {
std::chrono::duration<double> duration = mono_clock::now() - start_time;
logger().info("Track done! {}s", duration.count());
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
auto p_kv = **ref_kv_iter;
return tree->find(t, p_kv->key).si_then([cursors, ref_kv_iter](auto cursor) {
auto p_kv = **ref_kv_iter;
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
cursors->emplace(p_kv->key, cursor);
++(*ref_kv_iter);
return seastar::stop_iteration::no;
});
});
} else {
return eagain_iertr::now();
}
}).si_then([&t, this, ref_kv_iter, erase_end] {
*ref_kv_iter = kvs.random_begin();
logger().warn("start erasing {}/{} kvs ...",
erase_end - kvs.random_begin(), kvs.size());
auto start_time = mono_clock::now();
return trans_intr::repeat([&t, this, ref_kv_iter,
start_time, erase_end] ()
-> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == erase_end) {
std::chrono::duration<double> duration = mono_clock::now() - start_time;
logger().warn("Erase done! {}s", duration.count());
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
} else {
return erase_one(t, *ref_kv_iter
).si_then([ref_kv_iter] {
++(*ref_kv_iter);
return seastar::stop_iteration::no;
});
}
});
}).si_then([this, cursors, ref_kv_iter, erase_end] {
if constexpr (TRACK) {
logger().info("Verifing tracked cursors ...");
*ref_kv_iter = kvs.random_begin();
while (*ref_kv_iter != erase_end) {
auto p_kv = **ref_kv_iter;
auto c_it = cursors->find(p_kv->key);
ceph_assert(c_it != cursors->end());
ceph_assert(c_it->second.is_end());
cursors->erase(c_it);
++(*ref_kv_iter);
}
}
kvs.erase_from_random(kvs.random_begin(), erase_end);
if constexpr (TRACK) {
*ref_kv_iter = kvs.begin();
for (auto& [k, c] : *cursors) {
assert(*ref_kv_iter != kvs.end());
auto p_kv = **ref_kv_iter;
validate_cursor_from_item(p_kv->key, p_kv->value, c);
++(*ref_kv_iter);
}
logger().info("Verify done!");
}
});
}
eagain_ifuture<> get_stats(Transaction& t) {
return tree->get_stats_slow(t
).si_then([](auto stats) {
logger().warn("{}", stats);
});
}
eagain_ifuture<std::size_t> height(Transaction& t) {
return tree->height(t);
}
void reload(NodeExtentManagerURef&& nm) {
tree.emplace(std::move(nm));
}
eagain_ifuture<> validate_one(
Transaction& t, const iterator_t& iter_seq) {
assert(iter_seq != kvs.end());
auto next_iter = iter_seq + 1;
auto p_kv = *iter_seq;
return tree->find(t, p_kv->key
).si_then([p_kv, &t] (auto cursor) {
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
return cursor.get_next(t);
}).si_then([next_iter, this] (auto cursor) {
if (next_iter == kvs.end()) {
ceph_assert(cursor.is_end());
} else {
auto p_kv = *next_iter;
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
}
});
}
eagain_ifuture<> validate(Transaction& t) {
logger().info("Verifing inserted ...");
return seastar::do_with(
kvs.begin(),
[this, &t] (auto &iter) {
return trans_intr::repeat(
[this, &t, &iter]() ->eagain_iertr::future<seastar::stop_iteration> {
if (iter == kvs.end()) {
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
return validate_one(t, iter).si_then([&iter] {
++iter;
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
});
});
});
}
private:
static seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
KVPool<ValueItem>& kvs;
std::optional<BtreeImpl> tree;
};
}
| 17,436 | 29.80742 | 89 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/value.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <ostream>
#include "include/buffer.h"
#include "crimson/common/type_helpers.h"
#include "fwd.h"
#include "node_extent_mutable.h"
namespace crimson::os::seastore::onode {
// value size up to 64 KiB
using value_size_t = uint16_t;
enum class value_magic_t : uint8_t {
ONODE = 0x52,
TEST_UNBOUND,
TEST_BOUNDED,
TEST_EXTENDED,
};
inline std::ostream& operator<<(std::ostream& os, const value_magic_t& magic) {
switch (magic) {
case value_magic_t::ONODE:
return os << "ONODE";
case value_magic_t::TEST_UNBOUND:
return os << "TEST_UNBOUND";
case value_magic_t::TEST_BOUNDED:
return os << "TEST_BOUNDED";
case value_magic_t::TEST_EXTENDED:
return os << "TEST_EXTENDED";
default:
return os << "UNKNOWN(" << magic << ")";
}
}
/**
* value_config_t
*
* Parameters to create a value.
*/
struct value_config_t {
value_magic_t magic;
value_size_t payload_size;
value_size_t allocation_size() const;
void encode(ceph::bufferlist& encoded) const {
ceph::encode(magic, encoded);
ceph::encode(payload_size, encoded);
}
static value_config_t decode(ceph::bufferlist::const_iterator& delta) {
value_magic_t magic;
ceph::decode(magic, delta);
value_size_t payload_size;
ceph::decode(payload_size, delta);
return {magic, payload_size};
}
};
inline std::ostream& operator<<(std::ostream& os, const value_config_t& conf) {
return os << "ValueConf(" << conf.magic
<< ", " << conf.payload_size << "B)";
}
/**
* value_header_t
*
* The header structure in value layout.
*
* Value layout:
*
* # <- alloc size -> #
* # header | payload #
*/
struct value_header_t {
value_magic_t magic;
value_size_t payload_size;
bool operator==(const value_header_t& rhs) const {
return (magic == rhs.magic && payload_size == rhs.payload_size);
}
bool operator!=(const value_header_t& rhs) const {
return !(*this == rhs);
}
value_size_t allocation_size() const {
return payload_size + sizeof(value_header_t);
}
const char* get_payload() const {
return reinterpret_cast<const char*>(this) + sizeof(value_header_t);
}
NodeExtentMutable get_payload_mutable(NodeExtentMutable& node) const {
return node.get_mutable_absolute(get_payload(), payload_size);
}
char* get_payload() {
return reinterpret_cast<char*>(this) + sizeof(value_header_t);
}
void initiate(NodeExtentMutable& mut, const value_config_t& config) {
value_header_t header{config.magic, config.payload_size};
mut.copy_in_absolute(this, header);
mut.set_absolute(get_payload(), 0, config.payload_size);
}
static value_size_t estimate_allocation_size(value_size_t payload_size) {
return payload_size + sizeof(value_header_t);
}
} __attribute__((packed));
inline std::ostream& operator<<(std::ostream& os, const value_header_t& header) {
return os << "Value(" << header.magic
<< ", " << header.payload_size << "B)";
}
inline value_size_t value_config_t::allocation_size() const {
return value_header_t::estimate_allocation_size(payload_size);
}
/**
* ValueDeltaRecorder
*
* An abstracted class to handle user-defined value delta encode, decode and
* replay.
*/
class ValueDeltaRecorder {
public:
virtual ~ValueDeltaRecorder() = default;
ValueDeltaRecorder(const ValueDeltaRecorder&) = delete;
ValueDeltaRecorder(ValueDeltaRecorder&&) = delete;
ValueDeltaRecorder& operator=(const ValueDeltaRecorder&) = delete;
ValueDeltaRecorder& operator=(ValueDeltaRecorder&&) = delete;
/// Returns the value header magic for validation purpose.
virtual value_magic_t get_header_magic() const = 0;
/// Called by DeltaRecorderT to apply user-defined value delta.
virtual void apply_value_delta(ceph::bufferlist::const_iterator&,
NodeExtentMutable&,
laddr_t) = 0;
protected:
ValueDeltaRecorder(ceph::bufferlist& encoded) : encoded{encoded} {}
/// Get the delta buffer to encode user-defined value delta.
ceph::bufferlist& get_encoded(NodeExtentMutable&);
private:
ceph::bufferlist& encoded;
};
/**
* tree_conf_t
*
* Hard limits and compile-time configurations.
*/
struct tree_conf_t {
value_magic_t value_magic;
string_size_t max_ns_size;
string_size_t max_oid_size;
value_size_t max_value_payload_size;
extent_len_t internal_node_size;
extent_len_t leaf_node_size;
bool do_split_check = true;
};
class tree_cursor_t;
/**
* Value
*
* Value is a stateless view of the underlying value header and payload content
* stored in a tree leaf node, with the support to implement user-defined value
* deltas and to extend and trim the underlying payload data (not implemented
* yet).
*
* In the current implementation, we don't guarantee any alignment for value
* payload due to unaligned node layout and the according merge and split
* operations.
*/
class Value {
public:
virtual ~Value();
Value(const Value&) = default;
Value(Value&&) = default;
Value& operator=(const Value&) = delete;
Value& operator=(Value&&) = delete;
/// Returns whether the Value is still tracked in tree.
bool is_tracked() const;
/// Invalidate the Value before submitting transaction.
void invalidate();
/// Returns the value payload size.
value_size_t get_payload_size() const {
assert(is_tracked());
return read_value_header()->payload_size;
}
laddr_t get_hint() const;
bool operator==(const Value& v) const { return p_cursor == v.p_cursor; }
bool operator!=(const Value& v) const { return !(*this == v); }
protected:
Value(NodeExtentManager&, const ValueBuilder&, Ref<tree_cursor_t>&);
/// Extends the payload size.
eagain_ifuture<> extend(Transaction&, value_size_t extend_size);
/// Trim and shrink the payload.
eagain_ifuture<> trim(Transaction&, value_size_t trim_size);
/// Get the permission to mutate the payload with the optional value recorder.
template <typename PayloadT, typename ValueDeltaRecorderT>
std::pair<NodeExtentMutable&, ValueDeltaRecorderT*>
prepare_mutate_payload(Transaction& t) {
assert(is_tracked());
assert(sizeof(PayloadT) <= get_payload_size());
auto value_mutable = do_prepare_mutate_payload(t);
assert(value_mutable.first.get_write() ==
const_cast<const Value*>(this)->template read_payload<char>());
assert(value_mutable.first.get_length() == get_payload_size());
return {value_mutable.first,
static_cast<ValueDeltaRecorderT*>(value_mutable.second)};
}
/// Get the latest payload pointer for read.
template <typename PayloadT>
const PayloadT* read_payload() const {
assert(is_tracked());
// see Value documentation
static_assert(alignof(PayloadT) == 1);
assert(sizeof(PayloadT) <= get_payload_size());
return reinterpret_cast<const PayloadT*>(read_value_header()->get_payload());
}
private:
const value_header_t* read_value_header() const;
context_t get_context(Transaction& t) {
return {nm, vb, t};
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
do_prepare_mutate_payload(Transaction&);
NodeExtentManager& nm;
const ValueBuilder& vb;
Ref<tree_cursor_t> p_cursor;
template <typename ValueImpl>
friend class Btree;
};
/**
* ValueBuilder
*
* For tree nodes to build values without the need to depend on the actual
* implementation.
*/
struct ValueBuilder {
virtual value_magic_t get_header_magic() const = 0;
virtual string_size_t get_max_ns_size() const = 0;
virtual string_size_t get_max_oid_size() const = 0;
virtual value_size_t get_max_value_payload_size() const = 0;
virtual extent_len_t get_internal_node_size() const = 0;
virtual extent_len_t get_leaf_node_size() const = 0;
virtual std::unique_ptr<ValueDeltaRecorder>
build_value_recorder(ceph::bufferlist&) const = 0;
};
/**
* ValueBuilderImpl
*
* The concrete ValueBuilder implementation in Btree.
*/
template <typename ValueImpl>
struct ValueBuilderImpl final : public ValueBuilder {
ValueBuilderImpl() {
validate_tree_config(ValueImpl::TREE_CONF);
}
value_magic_t get_header_magic() const {
return ValueImpl::TREE_CONF.value_magic;
}
string_size_t get_max_ns_size() const override {
return ValueImpl::TREE_CONF.max_ns_size;
}
string_size_t get_max_oid_size() const override {
return ValueImpl::TREE_CONF.max_oid_size;
}
value_size_t get_max_value_payload_size() const override {
return ValueImpl::TREE_CONF.max_value_payload_size;
}
extent_len_t get_internal_node_size() const override {
return ValueImpl::TREE_CONF.internal_node_size;
}
extent_len_t get_leaf_node_size() const override {
return ValueImpl::TREE_CONF.leaf_node_size;
}
std::unique_ptr<ValueDeltaRecorder>
build_value_recorder(ceph::bufferlist& encoded) const override {
std::unique_ptr<ValueDeltaRecorder> ret =
std::make_unique<typename ValueImpl::Recorder>(encoded);
assert(ret->get_header_magic() == get_header_magic());
return ret;
}
ValueImpl build_value(NodeExtentManager& nm,
const ValueBuilder& vb,
Ref<tree_cursor_t>& p_cursor) const {
assert(vb.get_header_magic() == get_header_magic());
return ValueImpl(nm, vb, p_cursor);
}
};
void validate_tree_config(const tree_conf_t& conf);
/**
* Get the value recorder by type (the magic value) when the ValueBuilder is
* unavailable.
*/
std::unique_ptr<ValueDeltaRecorder>
build_value_recorder_by_type(ceph::bufferlist& encoded, const value_magic_t& magic);
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::onode::value_config_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::onode::value_header_t> : fmt::ostream_formatter {};
#endif
| 9,925 | 28.366864 | 108 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/dummy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <chrono>
#include <seastar/core/sleep.hh>
#include "include/buffer_raw.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager.h"
/**
* dummy.h
*
* Dummy backend implementations for test purposes.
*/
namespace crimson::os::seastore::onode {
class DummySuper final: public Super {
public:
DummySuper(Transaction& t, RootNodeTracker& tracker, laddr_t* p_root_laddr)
: Super(t, tracker), p_root_laddr{p_root_laddr} {}
~DummySuper() override = default;
protected:
laddr_t get_root_laddr() const override { return *p_root_laddr; }
void write_root_laddr(context_t c, laddr_t addr) override {
LOG_PREFIX(OTree::Dummy);
SUBDEBUGT(seastore_onode, "update root {:#x} ...", c.t, addr);
*p_root_laddr = addr;
}
private:
laddr_t* p_root_laddr;
};
class DummyNodeExtent final: public NodeExtent {
public:
DummyNodeExtent(ceph::bufferptr &&ptr) : NodeExtent(std::move(ptr)) {
state = extent_state_t::INITIAL_WRITE_PENDING;
}
DummyNodeExtent(const DummyNodeExtent& other) = delete;
~DummyNodeExtent() override = default;
void retire() {
assert(state == extent_state_t::INITIAL_WRITE_PENDING);
state = extent_state_t::INVALID;
bufferptr empty_bptr;
get_bptr().swap(empty_bptr);
}
protected:
NodeExtentRef mutate(context_t, DeltaRecorderURef&&) override {
ceph_abort("impossible path"); }
DeltaRecorder* get_recorder() const override {
return nullptr; }
CachedExtentRef duplicate_for_write(Transaction&) override {
ceph_abort("impossible path"); }
extent_types_t get_type() const override {
return extent_types_t::TEST_BLOCK; }
ceph::bufferlist get_delta() override {
ceph_abort("impossible path"); }
void apply_delta(const ceph::bufferlist&) override {
ceph_abort("impossible path"); }
};
template <bool SYNC>
class DummyNodeExtentManager final: public NodeExtentManager {
static constexpr size_t ALIGNMENT = 4096;
public:
~DummyNodeExtentManager() override = default;
std::size_t size() const { return allocate_map.size(); }
protected:
bool is_read_isolated() const override { return false; }
read_iertr::future<NodeExtentRef> read_extent(
Transaction& t, laddr_t addr) override {
SUBTRACET(seastore_onode, "reading at {:#x} ...", t, addr);
if constexpr (SYNC) {
return read_extent_sync(t, addr);
} else {
using namespace std::chrono_literals;
return seastar::sleep(1us).then([this, &t, addr] {
return read_extent_sync(t, addr);
});
}
}
alloc_iertr::future<NodeExtentRef> alloc_extent(
Transaction& t, laddr_t hint, extent_len_t len) override {
SUBTRACET(seastore_onode, "allocating {}B with hint {:#x} ...", t, len, hint);
if constexpr (SYNC) {
return alloc_extent_sync(t, len);
} else {
using namespace std::chrono_literals;
return seastar::sleep(1us).then([this, &t, len] {
return alloc_extent_sync(t, len);
});
}
}
retire_iertr::future<> retire_extent(
Transaction& t, NodeExtentRef extent) override {
SUBTRACET(seastore_onode,
"retiring {}B at {:#x} -- {} ...",
t, extent->get_length(), extent->get_laddr(), *extent);
if constexpr (SYNC) {
return retire_extent_sync(t, extent);
} else {
using namespace std::chrono_literals;
return seastar::sleep(1us).then([this, &t, extent] {
return retire_extent_sync(t, extent);
});
}
}
getsuper_iertr::future<Super::URef> get_super(
Transaction& t, RootNodeTracker& tracker) override {
SUBTRACET(seastore_onode, "get root ...", t);
if constexpr (SYNC) {
return get_super_sync(t, tracker);
} else {
using namespace std::chrono_literals;
return seastar::sleep(1us).then([this, &t, &tracker] {
return get_super_sync(t, tracker);
});
}
}
std::ostream& print(std::ostream& os) const override {
return os << "DummyNodeExtentManager(sync=" << SYNC << ")";
}
private:
read_iertr::future<NodeExtentRef> read_extent_sync(
Transaction& t, laddr_t addr) {
auto iter = allocate_map.find(addr);
assert(iter != allocate_map.end());
auto extent = iter->second;
SUBTRACET(seastore_onode,
"read {}B at {:#x} -- {}",
t, extent->get_length(), extent->get_laddr(), *extent);
assert(extent->get_laddr() == addr);
return read_iertr::make_ready_future<NodeExtentRef>(extent);
}
alloc_iertr::future<NodeExtentRef> alloc_extent_sync(
Transaction& t, extent_len_t len) {
assert(len % ALIGNMENT == 0);
auto r = ceph::buffer::create_aligned(len, ALIGNMENT);
auto addr = reinterpret_cast<laddr_t>(r->get_data());
auto bp = ceph::bufferptr(std::move(r));
auto extent = Ref<DummyNodeExtent>(new DummyNodeExtent(std::move(bp)));
extent->set_laddr(addr);
assert(allocate_map.find(extent->get_laddr()) == allocate_map.end());
allocate_map.insert({extent->get_laddr(), extent});
SUBDEBUGT(seastore_onode,
"allocated {}B at {:#x} -- {}",
t, extent->get_length(), extent->get_laddr(), *extent);
assert(extent->get_length() == len);
return alloc_iertr::make_ready_future<NodeExtentRef>(extent);
}
retire_iertr::future<> retire_extent_sync(
Transaction& t, NodeExtentRef _extent) {
auto& extent = static_cast<DummyNodeExtent&>(*_extent.get());
auto addr = extent.get_laddr();
auto len = extent.get_length();
extent.retire();
auto iter = allocate_map.find(addr);
assert(iter != allocate_map.end());
allocate_map.erase(iter);
SUBDEBUGT(seastore_onode, "retired {}B at {:#x}", t, len, addr);
return retire_iertr::now();
}
getsuper_iertr::future<Super::URef> get_super_sync(
Transaction& t, RootNodeTracker& tracker) {
SUBTRACET(seastore_onode, "got root {:#x}", t, root_laddr);
return getsuper_iertr::make_ready_future<Super::URef>(
Super::URef(new DummySuper(t, tracker, &root_laddr)));
}
static LOG_PREFIX(OTree::Dummy);
std::map<laddr_t, Ref<DummyNodeExtent>> allocate_map;
laddr_t root_laddr = L_ADDR_NULL;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::onode::DummyNodeExtent> : fmt::ostream_formatter {};
#endif
| 6,425 | 31.619289 | 109 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/seastore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <random>
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_delta_recorder.h"
/**
* seastore.h
*
* Seastore backend implementations.
*/
namespace crimson::os::seastore::onode {
class SeastoreSuper final: public Super {
public:
SeastoreSuper(Transaction& t, RootNodeTracker& tracker,
laddr_t root_addr, TransactionManager& tm)
: Super(t, tracker), root_addr{root_addr}, tm{tm} {}
~SeastoreSuper() override = default;
protected:
laddr_t get_root_laddr() const override {
return root_addr;
}
void write_root_laddr(context_t c, laddr_t addr) override {
LOG_PREFIX(OTree::Seastore);
SUBDEBUGT(seastore_onode, "update root {:#x} ...", c.t, addr);
root_addr = addr;
tm.write_onode_root(c.t, addr);
}
private:
laddr_t root_addr;
TransactionManager &tm;
};
class SeastoreNodeExtent final: public NodeExtent {
public:
SeastoreNodeExtent(ceph::bufferptr &&ptr)
: NodeExtent(std::move(ptr)) {}
SeastoreNodeExtent(const SeastoreNodeExtent& other)
: NodeExtent(other) {}
~SeastoreNodeExtent() override = default;
constexpr static extent_types_t TYPE = extent_types_t::ONODE_BLOCK_STAGED;
extent_types_t get_type() const override {
return TYPE;
}
protected:
NodeExtentRef mutate(context_t, DeltaRecorderURef&&) override;
DeltaRecorder* get_recorder() const override {
return recorder.get();
}
CachedExtentRef duplicate_for_write(Transaction&) override {
return CachedExtentRef(new SeastoreNodeExtent(*this));
}
ceph::bufferlist get_delta() override {
assert(recorder);
return recorder->get_delta();
}
void apply_delta(const ceph::bufferlist&) override;
private:
DeltaRecorderURef recorder;
};
class TransactionManagerHandle : public NodeExtentManager {
public:
TransactionManagerHandle(TransactionManager &tm) : tm{tm} {}
TransactionManager &tm;
};
template <bool INJECT_EAGAIN=false>
class SeastoreNodeExtentManager final: public TransactionManagerHandle {
public:
SeastoreNodeExtentManager(
TransactionManager &tm, laddr_t min, double p_eagain)
: TransactionManagerHandle(tm), addr_min{min}, p_eagain{p_eagain} {
if constexpr (INJECT_EAGAIN) {
assert(p_eagain > 0.0 && p_eagain < 1.0);
} else {
assert(p_eagain == 0.0);
}
}
~SeastoreNodeExtentManager() override = default;
void set_generate_eagain(bool enable) {
generate_eagain = enable;
}
protected:
bool is_read_isolated() const override { return true; }
read_iertr::future<NodeExtentRef> read_extent(
Transaction& t, laddr_t addr) override {
SUBTRACET(seastore_onode, "reading at {:#x} ...", t, addr);
if constexpr (INJECT_EAGAIN) {
if (trigger_eagain()) {
SUBDEBUGT(seastore_onode, "reading at {:#x}: trigger eagain", t, addr);
t.test_set_conflict();
return read_iertr::make_ready_future<NodeExtentRef>();
}
}
return tm.read_extent<SeastoreNodeExtent>(t, addr
).si_then([addr, &t](auto&& e) -> read_iertr::future<NodeExtentRef> {
SUBTRACET(seastore_onode,
"read {}B at {:#x} -- {}",
t, e->get_length(), e->get_laddr(), *e);
assert(e->get_laddr() == addr);
std::ignore = addr;
return read_iertr::make_ready_future<NodeExtentRef>(e);
});
}
alloc_iertr::future<NodeExtentRef> alloc_extent(
Transaction& t, laddr_t hint, extent_len_t len) override {
SUBTRACET(seastore_onode, "allocating {}B with hint {:#x} ...", t, len, hint);
if constexpr (INJECT_EAGAIN) {
if (trigger_eagain()) {
SUBDEBUGT(seastore_onode, "allocating {}B: trigger eagain", t, len);
t.test_set_conflict();
return alloc_iertr::make_ready_future<NodeExtentRef>();
}
}
return tm.alloc_extent<SeastoreNodeExtent>(t, hint, len
).si_then([len, &t](auto extent) {
SUBDEBUGT(seastore_onode,
"allocated {}B at {:#x} -- {}",
t, extent->get_length(), extent->get_laddr(), *extent);
if (!extent->is_initial_pending()) {
SUBERRORT(seastore_onode,
"allocated {}B but got invalid extent: {}",
t, len, *extent);
ceph_abort("fatal error");
}
assert(extent->get_length() == len);
std::ignore = len;
return NodeExtentRef(extent);
});
}
retire_iertr::future<> retire_extent(
Transaction& t, NodeExtentRef _extent) override {
LogicalCachedExtentRef extent = _extent;
auto addr = extent->get_laddr();
auto len = extent->get_length();
SUBDEBUGT(seastore_onode,
"retiring {}B at {:#x} -- {} ...",
t, len, addr, *extent);
if constexpr (INJECT_EAGAIN) {
if (trigger_eagain()) {
SUBDEBUGT(seastore_onode,
"retiring {}B at {:#x} -- {} : trigger eagain",
t, len, addr, *extent);
t.test_set_conflict();
return retire_iertr::now();
}
}
return tm.dec_ref(t, extent).si_then([addr, len, &t] (unsigned cnt) {
assert(cnt == 0);
SUBTRACET(seastore_onode, "retired {}B at {:#x} ...", t, len, addr);
});
}
getsuper_iertr::future<Super::URef> get_super(
Transaction& t, RootNodeTracker& tracker) override {
SUBTRACET(seastore_onode, "get root ...", t);
if constexpr (INJECT_EAGAIN) {
if (trigger_eagain()) {
SUBDEBUGT(seastore_onode, "get root: trigger eagain", t);
t.test_set_conflict();
return getsuper_iertr::make_ready_future<Super::URef>();
}
}
return tm.read_onode_root(t).si_then([this, &t, &tracker](auto root_addr) {
SUBTRACET(seastore_onode, "got root {:#x}", t, root_addr);
return Super::URef(new SeastoreSuper(t, tracker, root_addr, tm));
});
}
std::ostream& print(std::ostream& os) const override {
os << "SeastoreNodeExtentManager";
if constexpr (INJECT_EAGAIN) {
os << "(p_eagain=" << p_eagain << ")";
}
return os;
}
private:
static LOG_PREFIX(OTree::Seastore);
const laddr_t addr_min;
// XXX: conditional members by INJECT_EAGAIN
bool trigger_eagain() {
if (generate_eagain) {
double dice = rd();
assert(rd.min() == 0);
dice /= rd.max();
return dice <= p_eagain;
} else {
return false;
}
}
bool generate_eagain = true;
std::random_device rd;
double p_eagain;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::onode::SeastoreNodeExtent> : fmt::ostream_formatter {};
#endif
| 6,732 | 29.058036 | 112 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/test_replay.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/onode_manager/staged-fltree/node_delta_recorder.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager.h"
/** test_replay.h
*
* A special version of NodeExtent to help verify delta encode, decode and
* replay in recorder_t under debug build.
*/
namespace crimson::os::seastore::onode {
class TestReplayExtent final: public NodeExtent {
public:
using Ref = crimson::os::seastore::TCachedExtentRef<TestReplayExtent>;
void prepare_replay(NodeExtentRef from_extent) {
assert(get_length() == from_extent->get_length());
auto mut = do_get_mutable();
std::memcpy(mut.get_write(), from_extent->get_read(), get_length());
}
void replay_and_verify(NodeExtentRef replayed_extent) {
assert(get_length() == replayed_extent->get_length());
auto mut = do_get_mutable();
auto bl = recorder->get_delta();
assert(bl.length());
auto p = bl.cbegin();
recorder->apply_delta(p, mut, *this);
assert(p == bl.end());
auto cmp = std::memcmp(get_read(), replayed_extent->get_read(), get_length());
ceph_assert(cmp == 0 && "replay mismatch!");
}
static Ref create(extent_len_t length, DeltaRecorderURef&& recorder) {
auto r = ceph::buffer::create_aligned(length, 4096);
auto bp = ceph::bufferptr(std::move(r));
return new TestReplayExtent(std::move(bp), std::move(recorder));
}
protected:
NodeExtentRef mutate(context_t, DeltaRecorderURef&&) override {
ceph_abort("impossible path"); }
DeltaRecorder* get_recorder() const override {
ceph_abort("impossible path"); }
CachedExtentRef duplicate_for_write(Transaction&) override {
ceph_abort("impossible path"); }
extent_types_t get_type() const override {
return extent_types_t::TEST_BLOCK; }
ceph::bufferlist get_delta() override {
ceph_abort("impossible path"); }
void apply_delta(const ceph::bufferlist&) override {
ceph_abort("impossible path"); }
private:
TestReplayExtent(ceph::bufferptr&& ptr, DeltaRecorderURef&& recorder)
: NodeExtent(std::move(ptr)), recorder(std::move(recorder)) {
state = extent_state_t::MUTATION_PENDING;
}
DeltaRecorderURef recorder;
};
}
| 2,300 | 32.838235 | 82 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/item_iterator_stage.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
#include "key_layout.h"
#include "stage_types.h"
namespace crimson::os::seastore::onode {
class NodeExtentMutable;
/**
* item_iterator_t
*
* The STAGE_STRING implementation for node N0/N1, implements staged contract
* as an iterative container to resolve crush hash conflicts.
*
* The layout of the contaner to index ns, oid strings storing n items:
*
* # <--------- container range ---------> #
* #<~># items [i+1, n) #
* # # items [0, i) #<~>#
* # # <------ item i -------------> # #
* # # <--- item_range ---> | # #
* # # | # #
* # # next-stage | ns-oid | back_ # #
* # # contaner | strings | offset # #
* #...# range | | #...#
* ^ ^ | ^
* | | | |
* | +---------------------------+ |
* + p_items_start p_items_end +
*/
template <node_type_t NODE_TYPE>
class item_iterator_t {
using value_input_t = value_input_type_t<NODE_TYPE>;
using value_t = value_type_t<NODE_TYPE>;
public:
item_iterator_t(const container_range_t& range)
: node_size{range.node_size},
p_items_start(range.range.p_start),
p_items_end(range.range.p_end) {
assert(is_valid_node_size(node_size));
assert(p_items_start < p_items_end);
next_item_range(p_items_end);
}
const char* p_start() const { return item_range.p_start; }
const char* p_end() const { return item_range.p_end + sizeof(node_offset_t); }
const memory_range_t& get_item_range() const { return item_range; }
node_offset_t get_back_offset() const { return back_offset; }
// container type system
using key_get_type = const ns_oid_view_t&;
static constexpr auto CONTAINER_TYPE = ContainerType::ITERATIVE;
index_t index() const { return _index; }
key_get_type get_key() const {
if (!key.has_value()) {
key = ns_oid_view_t(item_range.p_end);
assert(item_range.p_start < (*key).p_start());
}
return *key;
}
node_offset_t size() const {
size_t ret = item_range.p_end - item_range.p_start + sizeof(node_offset_t);
assert(ret < node_size);
return ret;
};
node_offset_t size_to_nxt() const {
size_t ret = get_key().size() + sizeof(node_offset_t);
assert(ret < node_size);
return ret;
}
node_offset_t size_overhead() const {
return sizeof(node_offset_t) + get_key().size_overhead();
}
container_range_t get_nxt_container() const {
return {{item_range.p_start, get_key().p_start()}, node_size};
}
bool has_next() const {
assert(p_items_start <= item_range.p_start);
return p_items_start < item_range.p_start;
}
const item_iterator_t<NODE_TYPE>& operator++() const {
assert(has_next());
next_item_range(item_range.p_start);
key.reset();
++_index;
return *this;
}
void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
int start_offset = p_items_start - p_node_start;
int stage_size = p_items_end - p_items_start;
assert(start_offset > 0);
assert(stage_size > 0);
assert(start_offset + stage_size <= (int)node_size);
ceph::encode(static_cast<node_offset_t>(start_offset), encoded);
ceph::encode(static_cast<node_offset_t>(stage_size), encoded);
ceph::encode(_index, encoded);
}
static item_iterator_t decode(const char* p_node_start,
extent_len_t node_size,
ceph::bufferlist::const_iterator& delta) {
node_offset_t start_offset;
ceph::decode(start_offset, delta);
node_offset_t stage_size;
ceph::decode(stage_size, delta);
assert(start_offset > 0);
assert(stage_size > 0);
assert((unsigned)start_offset + stage_size <= node_size);
index_t index;
ceph::decode(index, delta);
item_iterator_t ret({{p_node_start + start_offset,
p_node_start + start_offset + stage_size},
node_size});
while (index > 0) {
++ret;
--index;
}
return ret;
}
static node_offset_t header_size() { return 0u; }
template <IsFullKey Key>
static node_offset_t estimate_insert(
const Key& key, const value_input_t&) {
return ns_oid_view_t::estimate_size(key) + sizeof(node_offset_t);
}
template <IsFullKey Key>
static memory_range_t insert_prefix(
NodeExtentMutable& mut, const item_iterator_t<NODE_TYPE>& iter,
const Key& key, bool is_end,
node_offset_t size, const char* p_left_bound);
static void update_size(
NodeExtentMutable& mut, const item_iterator_t<NODE_TYPE>& iter, int change);
static node_offset_t trim_until(NodeExtentMutable&, const item_iterator_t<NODE_TYPE>&);
static node_offset_t trim_at(
NodeExtentMutable&, const item_iterator_t<NODE_TYPE>&, node_offset_t trimmed);
static node_offset_t erase(
NodeExtentMutable&, const item_iterator_t<NODE_TYPE>&, const char*);
template <KeyT KT>
class Appender;
private:
void next_item_range(const char* p_end) const {
auto p_item_end = p_end - sizeof(node_offset_t);
assert(p_items_start < p_item_end);
back_offset = reinterpret_cast<const node_offset_packed_t*>(p_item_end)->value;
assert(back_offset);
const char* p_item_start = p_item_end - back_offset;
assert(p_items_start <= p_item_start);
item_range = {p_item_start, p_item_end};
}
extent_len_t node_size;
const char* p_items_start;
const char* p_items_end;
mutable memory_range_t item_range;
mutable node_offset_t back_offset;
mutable std::optional<ns_oid_view_t> key;
mutable index_t _index = 0u;
};
template <node_type_t NODE_TYPE>
template <KeyT KT>
class item_iterator_t<NODE_TYPE>::Appender {
public:
Appender(NodeExtentMutable* p_mut, char* p_append)
: p_mut{p_mut}, p_append{p_append} {}
Appender(NodeExtentMutable*, const item_iterator_t&, bool open);
bool append(const item_iterator_t<NODE_TYPE>& src, index_t& items);
char* wrap() { return p_append; }
std::tuple<NodeExtentMutable*, char*> open_nxt(const key_get_type&);
std::tuple<NodeExtentMutable*, char*> open_nxt(const full_key_t<KT>&);
void wrap_nxt(char* _p_append);
private:
NodeExtentMutable* p_mut;
char* p_append;
char* p_offset_while_open;
};
}
| 6,505 | 32.536082 | 89 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
#include "key_layout.h"
#include "stage_types.h"
namespace crimson::os::seastore::onode {
class NodeExtentMutable;
/**
* node_extent_t
*
* The top indexing stage implementation for node N0/N1/N2/N3, implements
* staged contract as an indexable container, and provides access to node
* header.
*
* The specific field layout are defined by FieldType which are
* node_fields_0_t, node_fields_1_t, node_fields_2_t, internal_fields_3_t and
* leaf_fields_3_t. Diagrams see node_stage_layout.h.
*/
template <typename FieldType, node_type_t _NODE_TYPE>
class node_extent_t {
public:
using value_input_t = value_input_type_t<_NODE_TYPE>;
using value_t = value_type_t<_NODE_TYPE>;
using num_keys_t = typename FieldType::num_keys_t;
static constexpr node_type_t NODE_TYPE = _NODE_TYPE;
static constexpr field_type_t FIELD_TYPE = FieldType::FIELD_TYPE;
// TODO: remove
node_extent_t() = default;
node_extent_t(const FieldType* p_fields, extent_len_t node_size)
: p_fields{p_fields}, node_size{node_size} {
assert(is_valid_node_size(node_size));
validate(*p_fields);
}
const char* p_start() const { return fields_start(*p_fields); }
bool is_level_tail() const { return p_fields->is_level_tail(); }
level_t level() const { return p_fields->header.level; }
node_offset_t free_size() const {
return p_fields->template free_size_before<NODE_TYPE>(
keys(), node_size);
}
extent_len_t total_size() const {
return p_fields->total_size(node_size);
}
const char* p_left_bound() const;
template <node_type_t T = NODE_TYPE>
std::enable_if_t<T == node_type_t::INTERNAL, const laddr_packed_t*>
get_end_p_laddr() const {
assert(is_level_tail());
if constexpr (FIELD_TYPE == field_type_t::N3) {
return p_fields->get_p_child_addr(keys(), node_size);
} else {
auto offset_start = p_fields->get_item_end_offset(
keys(), node_size);
assert(offset_start <= node_size);
offset_start -= sizeof(laddr_packed_t);
auto p_addr = p_start() + offset_start;
return reinterpret_cast<const laddr_packed_t*>(p_addr);
}
}
// container type system
using key_get_type = typename FieldType::key_get_type;
static constexpr auto CONTAINER_TYPE = ContainerType::INDEXABLE;
index_t keys() const { return p_fields->num_keys; }
key_get_type operator[] (index_t index) const {
return p_fields->get_key(index, node_size);
}
extent_len_t size_before(index_t index) const {
auto free_size = p_fields->template free_size_before<NODE_TYPE>(
index, node_size);
assert(total_size() >= free_size);
return total_size() - free_size;
}
node_offset_t size_to_nxt_at(index_t index) const;
node_offset_t size_overhead_at(index_t index) const {
return FieldType::ITEM_OVERHEAD; }
container_range_t get_nxt_container(index_t index) const;
template <typename T = FieldType>
std::enable_if_t<T::FIELD_TYPE == field_type_t::N3, const value_t*>
get_p_value(index_t index) const {
assert(index < keys());
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
return p_fields->get_p_child_addr(index, node_size);
} else {
auto range = get_nxt_container(index).range;
auto ret = reinterpret_cast<const value_header_t*>(range.p_start);
assert(range.p_start + ret->allocation_size() == range.p_end);
return ret;
}
}
void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
assert(p_node_start == p_start());
// nothing to encode as the container range is the entire extent
}
static node_extent_t decode(const char* p_node_start,
extent_len_t node_size,
ceph::bufferlist::const_iterator& delta) {
// nothing to decode
return node_extent_t(
reinterpret_cast<const FieldType*>(p_node_start),
node_size);
}
static void validate(const FieldType& fields) {
#ifndef NDEBUG
assert(fields.header.get_node_type() == NODE_TYPE);
assert(fields.header.get_field_type() == FieldType::FIELD_TYPE);
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
assert(fields.header.level > 0u);
} else {
assert(fields.header.level == 0u);
}
#endif
}
static void bootstrap_extent(
NodeExtentMutable&, field_type_t, node_type_t, bool, level_t);
static void update_is_level_tail(NodeExtentMutable&, const node_extent_t&, bool);
static node_offset_t header_size() { return FieldType::HEADER_SIZE; }
template <IsFullKey Key>
static node_offset_t estimate_insert(
const Key& key, const value_input_t& value) {
auto size = FieldType::estimate_insert_one();
if constexpr (FIELD_TYPE == field_type_t::N2) {
size += ns_oid_view_t::estimate_size(key);
} else if constexpr (FIELD_TYPE == field_type_t::N3 &&
NODE_TYPE == node_type_t::LEAF) {
size += value.allocation_size();
}
return size;
}
template <IsFullKey Key>
static const value_t* insert_at(
NodeExtentMutable& mut, const node_extent_t&,
const Key& key, const value_input_t& value,
index_t index, node_offset_t size, const char* p_left_bound) {
if constexpr (FIELD_TYPE == field_type_t::N3) {
ceph_abort("not implemented");
} else {
ceph_abort("impossible");
}
}
template <IsFullKey Key>
static memory_range_t insert_prefix_at(
NodeExtentMutable&, const node_extent_t&,
const Key& key,
index_t index, node_offset_t size, const char* p_left_bound);
static void update_size_at(
NodeExtentMutable&, const node_extent_t&, index_t index, int change);
static node_offset_t trim_until(
NodeExtentMutable&, const node_extent_t&, index_t index);
static node_offset_t trim_at(NodeExtentMutable&, const node_extent_t&,
index_t index, node_offset_t trimmed);
static node_offset_t erase_at(NodeExtentMutable&, const node_extent_t&,
index_t index, const char* p_left_bound);
template <KeyT KT>
class Appender;
private:
const FieldType& fields() const { return *p_fields; }
const FieldType* p_fields;
extent_len_t node_size;
};
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
class node_extent_t<FieldType, NODE_TYPE>::Appender {
public:
Appender(NodeExtentMutable* p_mut, char* p_append)
: p_mut{p_mut}, p_start{p_append} {
#ifndef NDEBUG
auto p_fields = reinterpret_cast<const FieldType*>(p_append);
assert(*(p_fields->header.get_field_type()) == FIELD_TYPE);
assert(p_fields->header.get_node_type() == NODE_TYPE);
assert(p_fields->num_keys == 0);
#endif
p_append_left = p_start + FieldType::HEADER_SIZE;
p_append_right = p_start + p_mut->get_length();
}
Appender(NodeExtentMutable*, const node_extent_t&, bool open = false);
void append(const node_extent_t& src, index_t from, index_t items);
void append(const full_key_t<KT>&, const value_input_t&, const value_t*&);
char* wrap();
std::tuple<NodeExtentMutable*, char*> open_nxt(const key_get_type&);
std::tuple<NodeExtentMutable*, char*> open_nxt(const full_key_t<KT>&);
void wrap_nxt(char* p_append) {
if constexpr (FIELD_TYPE != field_type_t::N3) {
assert(p_append < p_append_right);
assert(p_append_left < p_append);
p_append_right = p_append;
auto new_offset = p_append - p_start;
assert(new_offset > 0);
assert(new_offset < p_mut->get_length());
FieldType::append_offset(*p_mut, new_offset, p_append_left);
++num_keys;
} else {
ceph_abort("not implemented");
}
}
private:
const node_extent_t* p_src = nullptr;
NodeExtentMutable* p_mut;
char* p_start;
char* p_append_left;
char* p_append_right;
num_keys_t num_keys = 0;
};
}
| 8,009 | 33.377682 | 83 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage_layout.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "key_layout.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
namespace crimson::os::seastore::onode {
class NodeExtentMutable;
struct node_header_t {
static constexpr unsigned FIELD_TYPE_BITS = 6u;
static_assert(static_cast<uint8_t>(field_type_t::_MAX) <= 1u << FIELD_TYPE_BITS);
static constexpr unsigned NODE_TYPE_BITS = 1u;
static constexpr unsigned B_LEVEL_TAIL_BITS = 1u;
using bits_t = uint8_t;
node_header_t() {}
std::optional<field_type_t> get_field_type() const {
if (field_type >= FIELD_TYPE_MAGIC &&
field_type < static_cast<uint8_t>(field_type_t::_MAX)) {
return static_cast<field_type_t>(field_type);
} else {
return std::nullopt;
}
}
node_type_t get_node_type() const {
return static_cast<node_type_t>(node_type);
}
bool get_is_level_tail() const {
return is_level_tail;
}
static void bootstrap_extent(
NodeExtentMutable&, field_type_t, node_type_t, bool, level_t);
static void update_is_level_tail(NodeExtentMutable&, const node_header_t&, bool);
bits_t field_type : FIELD_TYPE_BITS;
bits_t node_type : NODE_TYPE_BITS;
bits_t is_level_tail : B_LEVEL_TAIL_BITS;
static_assert(sizeof(bits_t) * 8 ==
FIELD_TYPE_BITS + NODE_TYPE_BITS + B_LEVEL_TAIL_BITS);
level_t level;
private:
void set_field_type(field_type_t type) {
field_type = static_cast<uint8_t>(type);
}
void set_node_type(node_type_t type) {
node_type = static_cast<uint8_t>(type);
}
void set_is_level_tail(bool value) {
is_level_tail = static_cast<uint8_t>(value);
}
} __attribute__((packed));
inline std::ostream& operator<<(std::ostream& os, const node_header_t& header) {
auto field_type = header.get_field_type();
if (field_type.has_value()) {
os << "header" << header.get_node_type() << *field_type
<< "(is_level_tail=" << header.get_is_level_tail()
<< ", level=" << (unsigned)header.level << ")";
} else {
os << "header(INVALID)";
}
return os;
}
template <typename FixedKeyType, field_type_t _FIELD_TYPE>
struct _slot_t {
using key_t = FixedKeyType;
static constexpr field_type_t FIELD_TYPE = _FIELD_TYPE;
static constexpr node_offset_t OVERHEAD = sizeof(_slot_t) - sizeof(key_t);
key_t key;
node_offset_t right_offset;
} __attribute__((packed));
using slot_0_t = _slot_t<shard_pool_crush_t, field_type_t::N0>;
using slot_1_t = _slot_t<crush_t, field_type_t::N1>;
using slot_3_t = _slot_t<snap_gen_t, field_type_t::N3>;
struct node_range_t {
extent_len_t start;
extent_len_t end;
};
template <typename FieldType>
const char* fields_start(const FieldType& node) {
return reinterpret_cast<const char*>(&node);
}
template <node_type_t NODE_TYPE, typename FieldType>
node_range_t fields_free_range_before(
const FieldType& node, index_t index, extent_len_t node_size) {
assert(index <= node.num_keys);
extent_len_t offset_start = node.get_key_start_offset(index, node_size);
extent_len_t offset_end = node.get_item_end_offset(index, node_size);
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
if (node.is_level_tail() && index == node.num_keys) {
offset_end -= sizeof(laddr_t);
}
}
assert(offset_start <= offset_end);
assert(offset_end - offset_start < node_size);
return {offset_start, offset_end};
}
/**
* _node_fields_013_t (node_fields_0_t, node_fields_1_t, leaf_fields_3_t
*
* The STAGE_LEFT layout implementation for node N0/N1, or the STAGE_RIGHT
* layout implementation for leaf node N3.
*
* The node layout storing n slots:
*
* # <----------------------------- node range --------------------------------------> #
* # #<~># free space #
* # <----- left part -----------------------------> # <~# <----- right slots -------> #
* # # <---- left slots -------------> #~> # #
* # # slots [2, n) |<~># #<~>| right slots [2, n) #
* # # <- slot 0 -> | <- slot 1 -> | # # | <-- s1 --> | <-- s0 --> #
* # # | | # # | | #
* # | num_ # | right | | right | # # | next-stage | next-stage #
* # header | keys # key | offset | key | offset | # # | container | container #
* # | # 0 | 0 | 1 | 1 |...#...#...| or onode 1 | or onode 0 #
* | | ^ ^
* | | | |
* | +----------------+ |
* +--------------------------------------------+
*/
template <typename SlotType>
struct _node_fields_013_t {
// should be enough to index all keys under 64 KiB node
using num_keys_t = uint16_t;
using key_t = typename SlotType::key_t;
using key_get_type = const key_t&;
using me_t = _node_fields_013_t<SlotType>;
static constexpr field_type_t FIELD_TYPE = SlotType::FIELD_TYPE;
static constexpr node_offset_t HEADER_SIZE =
sizeof(node_header_t) + sizeof(num_keys_t);
static constexpr node_offset_t ITEM_OVERHEAD = SlotType::OVERHEAD;
bool is_level_tail() const { return header.get_is_level_tail(); }
extent_len_t total_size(extent_len_t node_size) const {
return node_size;
}
key_get_type get_key(
index_t index, extent_len_t node_size) const {
assert(index < num_keys);
return slots[index].key;
}
node_offset_t get_key_start_offset(
index_t index, extent_len_t node_size) const {
assert(index <= num_keys);
auto offset = HEADER_SIZE + sizeof(SlotType) * index;
assert(offset < node_size);
return offset;
}
node_offset_t get_item_start_offset(
index_t index, extent_len_t node_size) const {
assert(index < num_keys);
auto offset = slots[index].right_offset;
assert(offset < node_size);
return offset;
}
const void* p_offset(index_t index) const {
assert(index < num_keys);
return &slots[index].right_offset;
}
extent_len_t get_item_end_offset(
index_t index, extent_len_t node_size) const {
return index == 0 ? node_size
: get_item_start_offset(index - 1, node_size);
}
template <node_type_t NODE_TYPE>
node_offset_t free_size_before(
index_t index, extent_len_t node_size) const {
auto range = fields_free_range_before<NODE_TYPE>(*this, index, node_size);
return range.end - range.start;
}
static node_offset_t estimate_insert_one() { return sizeof(SlotType); }
template <IsFullKey Key>
static void insert_at(
NodeExtentMutable&, const Key& key,
const me_t& node, index_t index, node_offset_t size_right);
static node_offset_t erase_at(NodeExtentMutable&, const me_t&, index_t, const char*);
static void update_size_at(
NodeExtentMutable&, const me_t& node, index_t index, int change);
static void append_key(
NodeExtentMutable&, const key_t& key, char*& p_append);
template <IsFullKey Key>
static void append_key(
NodeExtentMutable& mut, const Key& key, char*& p_append) {
append_key(mut, key_t::from_key(key), p_append);
}
static void append_offset(
NodeExtentMutable& mut, node_offset_t offset_to_right, char*& p_append);
node_header_t header;
num_keys_t num_keys = 0u;
SlotType slots[];
} __attribute__((packed));
using node_fields_0_t = _node_fields_013_t<slot_0_t>;
using node_fields_1_t = _node_fields_013_t<slot_1_t>;
/**
* node_fields_2_t
*
* The STAGE_STRING layout implementation for node N2.
*
* The node layout storing n slots:
*
* # <--------------------------------- node range ----------------------------------------> #
* # #<~># free space #
* # <------- left part ---------------> # <~# <--------- right slots ---------------------> #
* # # <---- offsets ----> #~> #<~>| slots [2, n) #
* # # offsets [2, n) |<~># # | <----- slot 1 ----> | <----- slot 0 ----> #
* # # | # # | | #
* # | num_ # offset | offset | # # | next-stage | ns-oid | next-stage | ns-oid #
* # header | keys # 0 | 1 |...#...#...| container1 | 1 | container0 | 0 #
* | | ^ ^
* | | | |
* | +----------------+ |
* +-----------------------------------------------+
*/
struct node_fields_2_t {
// should be enough to index all keys under 64 KiB node
using num_keys_t = uint16_t;
using key_t = ns_oid_view_t;
using key_get_type = key_t;
static constexpr field_type_t FIELD_TYPE = field_type_t::N2;
static constexpr node_offset_t HEADER_SIZE =
sizeof(node_header_t) + sizeof(num_keys_t);
static constexpr node_offset_t ITEM_OVERHEAD = sizeof(node_offset_t);
bool is_level_tail() const { return header.get_is_level_tail(); }
extent_len_t total_size(extent_len_t node_size) const {
return node_size;
}
key_get_type get_key(
index_t index, extent_len_t node_size) const {
assert(index < num_keys);
auto item_end_offset = get_item_end_offset(index, node_size);
const char* p_start = fields_start(*this);
return key_t(p_start + item_end_offset);
}
node_offset_t get_key_start_offset(
index_t index, extent_len_t node_size) const {
assert(index <= num_keys);
auto offset = HEADER_SIZE + sizeof(node_offset_t) * num_keys;
assert(offset < node_size);
return offset;
}
node_offset_t get_item_start_offset(
index_t index, extent_len_t node_size) const {
assert(index < num_keys);
auto offset = offsets[index];
assert(offset < node_size);
return offset;
}
const void* p_offset(index_t index) const {
assert(index < num_keys);
return &offsets[index];
}
extent_len_t get_item_end_offset(
index_t index, extent_len_t node_size) const {
return index == 0 ? node_size
: get_item_start_offset(index - 1, node_size);
}
template <node_type_t NODE_TYPE>
node_offset_t free_size_before(
index_t index, extent_len_t node_size) const {
auto range = fields_free_range_before<NODE_TYPE>(*this, index, node_size);
return range.end - range.start;
}
static node_offset_t estimate_insert_one() { return sizeof(node_offset_t); }
template <IsFullKey Key>
static void insert_at(
NodeExtentMutable& mut, const Key& key,
const node_fields_2_t& node, index_t index, node_offset_t size_right) {
ceph_abort("not implemented");
}
static void update_size_at(
NodeExtentMutable& mut, const node_fields_2_t& node, index_t index, int change) {
ceph_abort("not implemented");
}
static void append_key(
NodeExtentMutable& mut, const key_t& key, char*& p_append) {
ns_oid_view_t::append(mut, key, p_append);
}
template <IsFullKey Key>
static void append_key(
NodeExtentMutable& mut, const Key& key, char*& p_append) {
ns_oid_view_t::append(mut, key, p_append);
}
static void append_offset(
NodeExtentMutable& mut, node_offset_t offset_to_right, char*& p_append);
node_header_t header;
num_keys_t num_keys = 0u;
node_offset_t offsets[];
} __attribute__((packed));
/**
* internal_fields_3_t
*
* The STAGE_RIGHT layout implementation for N2.
*
* The node layout storing 3 children:
*
* # <---------------- node range ---------------------------> #
* # # <-- keys ---> # <---- laddrs -----------> #
* # free space: # |<~># |<~>#
* # # | # | #
* # | num_ # key | key | # laddr | laddr | laddr | #
* # header | keys # 0 | 1 |...# 0 | 1 | 2 |...#
*/
struct internal_fields_3_t {
using key_get_type = const snap_gen_t&;
// should be enough to index all keys under 64 KiB node
using num_keys_t = uint16_t;
static constexpr field_type_t FIELD_TYPE = field_type_t::N3;
static constexpr node_offset_t HEADER_SIZE =
sizeof(node_header_t) + sizeof(num_keys_t);
static constexpr node_offset_t ITEM_SIZE =
sizeof(snap_gen_t) + sizeof(laddr_t);
static constexpr node_offset_t ITEM_OVERHEAD = 0u;
bool is_level_tail() const { return header.get_is_level_tail(); }
extent_len_t total_size(extent_len_t node_size) const {
if (is_level_tail()) {
return node_size - sizeof(snap_gen_t);
} else {
return node_size;
}
}
key_get_type get_key(
index_t index, extent_len_t node_size) const {
assert(index < num_keys);
return keys[index];
}
template <node_type_t NODE_TYPE>
std::enable_if_t<NODE_TYPE == node_type_t::INTERNAL, node_offset_t>
free_size_before(index_t index, extent_len_t node_size) const {
assert(index <= num_keys);
assert(num_keys <= get_max_num_keys(node_size));
extent_len_t free = total_size(node_size) - HEADER_SIZE -
index * ITEM_SIZE;
if (is_level_tail() && index == num_keys) {
free -= sizeof(laddr_t);
}
return free;
}
const laddr_packed_t* get_p_child_addr(
index_t index, extent_len_t node_size) const {
#ifndef NDEBUG
if (is_level_tail()) {
assert(index <= num_keys);
} else {
assert(index < num_keys);
}
#endif
auto p_addrs = reinterpret_cast<const laddr_packed_t*>(
&keys[get_num_keys_limit(node_size)]);
auto ret = p_addrs + index;
assert((const char*)ret < fields_start(*this) + node_size);
return ret;
}
static node_offset_t estimate_insert_one() { return ITEM_SIZE; }
template <IsFullKey Key>
static void insert_at(
NodeExtentMutable& mut, const Key& key,
const internal_fields_3_t& node,
index_t index, node_offset_t size_right) {
ceph_abort("not implemented");
}
static void update_size_at(
NodeExtentMutable& mut, const internal_fields_3_t& node,
index_t index, int change) {
ceph_abort("not implemented");
}
node_header_t header;
num_keys_t num_keys = 0u;
snap_gen_t keys[];
private:
num_keys_t get_max_num_keys(extent_len_t node_size) const {
auto num_limit = get_num_keys_limit(node_size);
return (is_level_tail() ? num_limit - 1 : num_limit);
}
static num_keys_t get_num_keys_limit(extent_len_t node_size) {
return (node_size - HEADER_SIZE) / ITEM_SIZE;
}
} __attribute__((packed));
using leaf_fields_3_t = _node_fields_013_t<slot_3_t>;
}
| 14,889 | 35.584767 | 94 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/stage_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cassert>
#include <optional>
#include <ostream>
#include "crimson/os/seastore/onode_manager/staged-fltree/fwd.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/value.h"
namespace crimson::os::seastore::onode {
using match_stage_t = int8_t;
constexpr match_stage_t STAGE_LEFT = 2; // shard/pool/crush
constexpr match_stage_t STAGE_STRING = 1; // nspace/oid
constexpr match_stage_t STAGE_RIGHT = 0; // snap/gen
constexpr auto STAGE_TOP = STAGE_LEFT;
constexpr auto STAGE_BOTTOM = STAGE_RIGHT;
constexpr bool is_valid_stage(match_stage_t stage) {
return std::clamp(stage, STAGE_BOTTOM, STAGE_TOP) == stage;
}
// TODO: replace by
// using match_history_t = int8_t;
// left_m, str_m, right_m
// 3: GT,
// 2: EQ, GT,
// 1: EQ, EQ, GT
// 0: EQ, EQ, EQ
// -1: EQ, EQ, LT
// -2: EQ, LT,
// -3: LT,
struct MatchHistory {
template <match_stage_t STAGE>
const std::optional<MatchKindCMP>& get() const {
static_assert(is_valid_stage(STAGE));
if constexpr (STAGE == STAGE_RIGHT) {
return right_match;
} else if (STAGE == STAGE_STRING) {
return string_match;
} else {
return left_match;
}
}
const std::optional<MatchKindCMP>&
get_by_stage(match_stage_t stage) const {
assert(is_valid_stage(stage));
if (stage == STAGE_RIGHT) {
return right_match;
} else if (stage == STAGE_STRING) {
return string_match;
} else {
return left_match;
}
}
template <match_stage_t STAGE = STAGE_TOP>
const bool is_GT() const;
template <match_stage_t STAGE>
void set(MatchKindCMP match) {
static_assert(is_valid_stage(STAGE));
if constexpr (STAGE < STAGE_TOP) {
assert(*get<STAGE + 1>() == MatchKindCMP::EQ);
}
assert(!get<STAGE>().has_value() || *get<STAGE>() != MatchKindCMP::EQ);
const_cast<std::optional<MatchKindCMP>&>(get<STAGE>()) = match;
}
std::ostream& dump(std::ostream& os) const {
os << "history(";
dump_each(os, left_match) << ", ";
dump_each(os, string_match) << ", ";
dump_each(os, right_match) << ")";
return os;
}
std::ostream& dump_each(
std::ostream& os, const std::optional<MatchKindCMP>& match) const {
if (!match.has_value()) {
return os << "--";
} else if (*match == MatchKindCMP::LT) {
return os << "LT";
} else if (*match == MatchKindCMP::EQ) {
return os << "EQ";
} else if (*match == MatchKindCMP::GT) {
return os << "GT";
} else {
ceph_abort("impossble path");
}
}
std::optional<MatchKindCMP> left_match;
std::optional<MatchKindCMP> string_match;
std::optional<MatchKindCMP> right_match;
};
inline std::ostream& operator<<(std::ostream& os, const MatchHistory& pos) {
return pos.dump(os);
}
template <match_stage_t STAGE>
struct _check_GT_t {
static bool eval(const MatchHistory* history) {
return history->get<STAGE>() &&
(*history->get<STAGE>() == MatchKindCMP::GT ||
(*history->get<STAGE>() == MatchKindCMP::EQ &&
_check_GT_t<STAGE - 1>::eval(history)));
}
};
template <>
struct _check_GT_t<STAGE_RIGHT> {
static bool eval(const MatchHistory* history) {
return history->get<STAGE_RIGHT>() &&
*history->get<STAGE_RIGHT>() == MatchKindCMP::GT;
}
};
template <match_stage_t STAGE>
const bool MatchHistory::is_GT() const {
static_assert(is_valid_stage(STAGE));
if constexpr (STAGE < STAGE_TOP) {
assert(get<STAGE + 1>() == MatchKindCMP::EQ);
}
return _check_GT_t<STAGE>::eval(this);
}
template <match_stage_t STAGE>
struct staged_position_t {
static_assert(is_valid_stage(STAGE));
using me_t = staged_position_t<STAGE>;
using nxt_t = staged_position_t<STAGE - 1>;
bool is_end() const {
if (index == INDEX_END) {
return true;
} else {
assert(is_valid_index(index));
return false;
}
}
index_t& index_by_stage(match_stage_t stage) {
assert(stage <= STAGE);
if (STAGE == stage) {
return index;
} else {
return nxt.index_by_stage(stage);
}
}
auto operator<=>(const me_t& o) const = default;
void assert_next_to(const me_t& prv) const {
#ifndef NDEBUG
if (is_end()) {
assert(!prv.is_end());
} else if (index == prv.index) {
assert(!nxt.is_end());
nxt.assert_next_to(prv.nxt);
} else if (index == prv.index + 1) {
assert(!prv.nxt.is_end());
assert(nxt == nxt_t::begin());
} else {
assert(false);
}
#endif
}
me_t& operator-=(const me_t& o) {
assert(is_valid_index(o.index));
assert(index >= o.index);
if (index != INDEX_END) {
assert(is_valid_index(index));
index -= o.index;
if (index == 0) {
nxt -= o.nxt;
}
}
return *this;
}
me_t& operator+=(const me_t& o) {
assert(is_valid_index(index));
assert(is_valid_index(o.index));
index += o.index;
nxt += o.nxt;
return *this;
}
void encode(ceph::bufferlist& encoded) const {
ceph::encode(index, encoded);
nxt.encode(encoded);
}
static me_t decode(ceph::bufferlist::const_iterator& delta) {
me_t ret;
ceph::decode(ret.index, delta);
ret.nxt = nxt_t::decode(delta);
return ret;
}
static me_t begin() { return {0u, nxt_t::begin()}; }
static me_t end() {
return {INDEX_END, nxt_t::end()};
}
index_t index;
nxt_t nxt;
};
template <match_stage_t STAGE>
std::ostream& operator<<(std::ostream& os, const staged_position_t<STAGE>& pos) {
if (pos.index == INDEX_END) {
os << "END";
} else if (pos.index == INDEX_LAST) {
os << "LAST";
} else {
os << pos.index;
assert(is_valid_index(pos.index));
}
return os << ", " << pos.nxt;
}
template <>
struct staged_position_t<STAGE_BOTTOM> {
using me_t = staged_position_t<STAGE_BOTTOM>;
bool is_end() const {
if (index == INDEX_END) {
return true;
} else {
assert(is_valid_index(index));
return false;
}
}
index_t& index_by_stage(match_stage_t stage) {
assert(stage == STAGE_BOTTOM);
return index;
}
auto operator<=>(const me_t&) const = default;
me_t& operator-=(const me_t& o) {
assert(is_valid_index(o.index));
assert(index >= o.index);
if (index != INDEX_END) {
assert(is_valid_index(index));
index -= o.index;
}
return *this;
}
me_t& operator+=(const me_t& o) {
assert(is_valid_index(index));
assert(is_valid_index(o.index));
index += o.index;
return *this;
}
void assert_next_to(const me_t& prv) const {
#ifndef NDEBUG
if (is_end()) {
assert(!prv.is_end());
} else {
assert(index == prv.index + 1);
}
#endif
}
void encode(ceph::bufferlist& encoded) const {
ceph::encode(index, encoded);
}
static me_t decode(ceph::bufferlist::const_iterator& delta) {
me_t ret;
ceph::decode(ret.index, delta);
return ret;
}
static me_t begin() { return {0u}; }
static me_t end() { return {INDEX_END}; }
index_t index;
};
template <>
inline std::ostream& operator<<(std::ostream& os, const staged_position_t<STAGE_BOTTOM>& pos) {
if (pos.index == INDEX_END) {
os << "END";
} else if (pos.index == INDEX_LAST) {
os << "LAST";
} else {
os << pos.index;
assert(is_valid_index(pos.index));
}
return os;
}
using search_position_t = staged_position_t<STAGE_TOP>;
template <match_stage_t STAGE>
const staged_position_t<STAGE>& cast_down(const search_position_t& pos) {
if constexpr (STAGE == STAGE_LEFT) {
return pos;
} else if constexpr (STAGE == STAGE_STRING) {
#ifndef NDEBUG
if (pos.is_end()) {
assert(pos.nxt.is_end());
} else {
assert(pos.index == 0u);
}
#endif
return pos.nxt;
} else if constexpr (STAGE == STAGE_RIGHT) {
#ifndef NDEBUG
if (pos.is_end()) {
assert(pos.nxt.nxt.is_end());
} else {
assert(pos.index == 0u);
assert(pos.nxt.index == 0u);
}
#endif
return pos.nxt.nxt;
} else {
ceph_abort("impossible path");
}
}
template <match_stage_t STAGE>
staged_position_t<STAGE>& cast_down(search_position_t& pos) {
const search_position_t& _pos = pos;
return const_cast<staged_position_t<STAGE>&>(cast_down<STAGE>(_pos));
}
template <match_stage_t STAGE>
staged_position_t<STAGE>& cast_down_fill_0(search_position_t& pos) {
if constexpr (STAGE == STAGE_LEFT) {
return pos;
} if constexpr (STAGE == STAGE_STRING) {
pos.index = 0;
return pos.nxt;
} else if constexpr (STAGE == STAGE_RIGHT) {
pos.index = 0;
pos.nxt.index = 0;
return pos.nxt.nxt;
} else {
ceph_abort("impossible path");
}
}
inline search_position_t&& normalize(search_position_t&& pos) { return std::move(pos); }
template <match_stage_t STAGE, typename = std::enable_if_t<STAGE != STAGE_TOP>>
search_position_t normalize(staged_position_t<STAGE>&& pos) {
if (pos.is_end()) {
return search_position_t::end();
}
if constexpr (STAGE == STAGE_STRING) {
return {0u, std::move(pos)};
} else if (STAGE == STAGE_RIGHT) {
return {0u, {0u, std::move(pos)}};
} else {
ceph_abort("impossible path");
}
}
struct memory_range_t {
const char* p_start;
const char* p_end;
};
struct container_range_t {
memory_range_t range;
extent_len_t node_size;
};
enum class ContainerType { ITERATIVE, INDEXABLE };
// the input type to construct the value during insert.
template <node_type_t> struct value_input_type;
template<> struct value_input_type<node_type_t::INTERNAL> { using type = laddr_t; };
template<> struct value_input_type<node_type_t::LEAF> { using type = value_config_t; };
template <node_type_t NODE_TYPE>
using value_input_type_t = typename value_input_type<NODE_TYPE>::type;
template <node_type_t> struct value_type;
template<> struct value_type<node_type_t::INTERNAL> { using type = laddr_packed_t; };
template<> struct value_type<node_type_t::LEAF> { using type = value_header_t; };
template <node_type_t NODE_TYPE>
using value_type_t = typename value_type<NODE_TYPE>::type;
template <node_type_t NODE_TYPE, match_stage_t STAGE>
struct staged_result_t {
using me_t = staged_result_t<NODE_TYPE, STAGE>;
bool is_end() const { return position.is_end(); }
static me_t end() {
return {staged_position_t<STAGE>::end(), nullptr, MSTAT_END};
}
template <typename T = me_t>
static std::enable_if_t<STAGE != STAGE_BOTTOM, T> from_nxt(
index_t index, const staged_result_t<NODE_TYPE, STAGE - 1>& nxt_stage_result) {
return {{index, nxt_stage_result.position},
nxt_stage_result.p_value,
nxt_stage_result.mstat};
}
staged_position_t<STAGE> position;
const value_type_t<NODE_TYPE>* p_value;
match_stat_t mstat;
};
template <node_type_t NODE_TYPE>
using lookup_result_t = staged_result_t<NODE_TYPE, STAGE_TOP>;
template <node_type_t NODE_TYPE>
lookup_result_t<NODE_TYPE>&& normalize(
lookup_result_t<NODE_TYPE>&& result) { return std::move(result); }
template <node_type_t NODE_TYPE, match_stage_t STAGE,
typename = std::enable_if_t<STAGE != STAGE_TOP>>
lookup_result_t<NODE_TYPE> normalize(
staged_result_t<NODE_TYPE, STAGE>&& result) {
// FIXME: assert result.mstat correct
return {normalize(std::move(result.position)), result.p_value, result.mstat};
}
struct node_stats_t {
size_t size_persistent = 0;
size_t size_filled = 0;
// filled by staged::get_stats()
size_t size_logical = 0;
size_t size_overhead = 0;
size_t size_value = 0;
unsigned num_kvs = 0;
};
}
#if FMT_VERSION >= 90000
template <crimson::os::seastore::onode::match_stage_t S>
struct fmt::formatter<crimson::os::seastore::onode::staged_position_t<S>> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::onode::MatchHistory> : fmt::ostream_formatter {};
#endif
| 11,976 | 26.036117 | 106 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/sub_items_stage.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <variant>
#include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
#include "key_layout.h"
#include "stage_types.h"
namespace crimson::os::seastore::onode {
class NodeExtentMutable;
struct internal_sub_item_t {
const snap_gen_t& get_key() const { return key; }
const laddr_packed_t* get_p_value() const { return &value; }
snap_gen_t key;
laddr_packed_t value;
} __attribute__((packed));
/**
* internal_sub_items_t
*
* The STAGE_RIGHT implementation for internal node N0/N1/N2, implements staged
* contract as an indexable container to index snap-gen to child node
* addresses.
*
* The layout of the contaner storing n sub-items:
*
* # <--------- container range -----------> #
* #<~># sub-items [2, n) #
* # # <- sub-item 1 -> # <- sub-item 0 -> #
* #...# snap-gen | laddr # snap-gen | laddr #
* ^
* |
* p_first_item +
*/
class internal_sub_items_t {
public:
using num_keys_t = index_t;
internal_sub_items_t(const container_range_t& _range)
: node_size{_range.node_size} {
assert(is_valid_node_size(node_size));
auto& range = _range.range;
assert(range.p_start < range.p_end);
assert((range.p_end - range.p_start) % sizeof(internal_sub_item_t) == 0);
num_items = (range.p_end - range.p_start) / sizeof(internal_sub_item_t);
assert(num_items > 0);
auto _p_first_item = range.p_end - sizeof(internal_sub_item_t);
p_first_item = reinterpret_cast<const internal_sub_item_t*>(_p_first_item);
}
// container type system
using key_get_type = const snap_gen_t&;
static constexpr auto CONTAINER_TYPE = ContainerType::INDEXABLE;
num_keys_t keys() const { return num_items; }
key_get_type operator[](index_t index) const {
assert(index < num_items);
return (p_first_item - index)->get_key();
}
node_offset_t size_before(index_t index) const {
size_t ret = index * sizeof(internal_sub_item_t);
assert(ret < node_size);
return ret;
}
const laddr_packed_t* get_p_value(index_t index) const {
assert(index < num_items);
return (p_first_item - index)->get_p_value();
}
node_offset_t size_overhead_at(index_t index) const { return 0u; }
void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
auto p_end = reinterpret_cast<const char*>(p_first_item) +
sizeof(internal_sub_item_t);
auto p_start = p_end - num_items * sizeof(internal_sub_item_t);
int start_offset = p_start - p_node_start;
int stage_size = p_end - p_start;
assert(start_offset > 0);
assert(stage_size > 0);
assert(start_offset + stage_size < (int)node_size);
ceph::encode(static_cast<node_offset_t>(start_offset), encoded);
ceph::encode(static_cast<node_offset_t>(stage_size), encoded);
}
static internal_sub_items_t decode(
const char* p_node_start,
extent_len_t node_size,
ceph::bufferlist::const_iterator& delta) {
node_offset_t start_offset;
ceph::decode(start_offset, delta);
node_offset_t stage_size;
ceph::decode(stage_size, delta);
assert(start_offset > 0);
assert(stage_size > 0);
assert((unsigned)start_offset + stage_size < node_size);
return internal_sub_items_t({{p_node_start + start_offset,
p_node_start + start_offset + stage_size},
node_size});
}
static node_offset_t header_size() { return 0u; }
template <IsFullKey Key>
static node_offset_t estimate_insert(
const Key&, const laddr_t&) {
return sizeof(internal_sub_item_t);
}
template <IsFullKey Key>
static const laddr_packed_t* insert_at(
NodeExtentMutable&, const internal_sub_items_t&,
const Key&, const laddr_t&,
index_t index, node_offset_t size, const char* p_left_bound);
static node_offset_t trim_until(NodeExtentMutable&, internal_sub_items_t&, index_t);
static node_offset_t erase_at(
NodeExtentMutable&, const internal_sub_items_t&, index_t, const char*);
template <KeyT KT>
class Appender;
private:
extent_len_t node_size;
index_t num_items;
const internal_sub_item_t* p_first_item;
};
template <KeyT KT>
class internal_sub_items_t::Appender {
public:
Appender(NodeExtentMutable* p_mut, char* p_append)
: p_mut{p_mut}, p_append{p_append} {}
Appender(NodeExtentMutable* p_mut, const internal_sub_items_t& sub_items)
: p_mut{p_mut},
p_append{(char*)(sub_items.p_first_item + 1 - sub_items.keys())} {
assert(sub_items.keys());
}
void append(const internal_sub_items_t& src, index_t from, index_t items);
void append(const full_key_t<KT>&, const laddr_t&, const laddr_packed_t*&);
char* wrap() { return p_append; }
private:
NodeExtentMutable* p_mut;
char* p_append;
};
/**
* leaf_sub_items_t
*
* The STAGE_RIGHT implementation for leaf node N0/N1/N2, implements staged
* contract as an indexable container to index snap-gen to value_header_t.
*
* The layout of the contaner storing n sub-items:
*
* # <------------------------ container range -------------------------------> #
* # <---------- sub-items ----------------> # <--- offsets ---------# #
* #<~># sub-items [2, n) #<~>| offsets [2, n) # #
* # # <- sub-item 1 -> # <- sub-item 0 -> # | # #
* #...# snap-gen | value # snap-gen | value #...| offset1 | offset0 # num_keys #
* ^ ^ ^
* | | |
* p_items_end + p_offsets + |
* p_num_keys +
*/
class leaf_sub_items_t {
public:
// should be enough to index all keys under 64 KiB node
using num_keys_t = uint16_t;
// TODO: remove if num_keys_t is aligned
struct num_keys_packed_t {
num_keys_t value;
} __attribute__((packed));
leaf_sub_items_t(const container_range_t& _range)
: node_size{_range.node_size} {
assert(is_valid_node_size(node_size));
auto& range = _range.range;
assert(range.p_start < range.p_end);
auto _p_num_keys = range.p_end - sizeof(num_keys_t);
assert(range.p_start < _p_num_keys);
p_num_keys = reinterpret_cast<const num_keys_packed_t*>(_p_num_keys);
assert(keys());
auto _p_offsets = _p_num_keys - sizeof(node_offset_t);
assert(range.p_start < _p_offsets);
p_offsets = reinterpret_cast<const node_offset_packed_t*>(_p_offsets);
p_items_end = reinterpret_cast<const char*>(&get_offset(keys() - 1));
assert(range.p_start < p_items_end);
assert(range.p_start == p_start());
}
bool operator==(const leaf_sub_items_t& x) {
return (p_num_keys == x.p_num_keys &&
p_offsets == x.p_offsets &&
p_items_end == x.p_items_end);
}
const char* p_start() const { return get_item_end(keys()); }
const node_offset_packed_t& get_offset(index_t index) const {
assert(index < keys());
return *(p_offsets - index);
}
const node_offset_t get_offset_to_end(index_t index) const {
assert(index <= keys());
return index == 0 ? 0 : get_offset(index - 1).value;
}
const char* get_item_start(index_t index) const {
return p_items_end - get_offset(index).value;
}
const char* get_item_end(index_t index) const {
return p_items_end - get_offset_to_end(index);
}
// container type system
using key_get_type = const snap_gen_t&;
static constexpr auto CONTAINER_TYPE = ContainerType::INDEXABLE;
num_keys_t keys() const { return p_num_keys->value; }
key_get_type operator[](index_t index) const {
assert(index < keys());
auto pointer = get_item_end(index);
assert(get_item_start(index) < pointer);
pointer -= sizeof(snap_gen_t);
assert(get_item_start(index) < pointer);
return *reinterpret_cast<const snap_gen_t*>(pointer);
}
node_offset_t size_before(index_t index) const {
assert(index <= keys());
size_t ret;
if (index == 0) {
ret = sizeof(num_keys_t);
} else {
--index;
ret = sizeof(num_keys_t) +
(index + 1) * sizeof(node_offset_t) +
get_offset(index).value;
}
assert(ret < node_size);
return ret;
}
node_offset_t size_overhead_at(index_t index) const { return sizeof(node_offset_t); }
const value_header_t* get_p_value(index_t index) const {
assert(index < keys());
auto pointer = get_item_start(index);
auto value = reinterpret_cast<const value_header_t*>(pointer);
assert(pointer + value->allocation_size() + sizeof(snap_gen_t) ==
get_item_end(index));
return value;
}
void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
auto p_end = reinterpret_cast<const char*>(p_num_keys) +
sizeof(num_keys_t);
int start_offset = p_start() - p_node_start;
int stage_size = p_end - p_start();
assert(start_offset > 0);
assert(stage_size > 0);
assert(start_offset + stage_size < (int)node_size);
ceph::encode(static_cast<node_offset_t>(start_offset), encoded);
ceph::encode(static_cast<node_offset_t>(stage_size), encoded);
}
static leaf_sub_items_t decode(
const char* p_node_start,
extent_len_t node_size,
ceph::bufferlist::const_iterator& delta) {
node_offset_t start_offset;
ceph::decode(start_offset, delta);
node_offset_t stage_size;
ceph::decode(stage_size, delta);
assert(start_offset > 0);
assert(stage_size > 0);
assert((unsigned)start_offset + stage_size < node_size);
return leaf_sub_items_t({{p_node_start + start_offset,
p_node_start + start_offset + stage_size},
node_size});
}
static node_offset_t header_size() { return sizeof(num_keys_t); }
template <IsFullKey Key>
static node_offset_t estimate_insert(
const Key&, const value_config_t& value) {
return value.allocation_size() + sizeof(snap_gen_t) + sizeof(node_offset_t);
}
template <IsFullKey Key>
static const value_header_t* insert_at(
NodeExtentMutable&, const leaf_sub_items_t&,
const Key&, const value_config_t&,
index_t index, node_offset_t size, const char* p_left_bound);
static node_offset_t trim_until(NodeExtentMutable&, leaf_sub_items_t&, index_t index);
static node_offset_t erase_at(
NodeExtentMutable&, const leaf_sub_items_t&, index_t, const char*);
template <KeyT KT>
class Appender;
private:
extent_len_t node_size;
const num_keys_packed_t* p_num_keys;
const node_offset_packed_t* p_offsets;
const char* p_items_end;
};
constexpr index_t APPENDER_LIMIT = 3u;
template <KeyT KT>
class leaf_sub_items_t::Appender {
struct range_items_t {
index_t from;
index_t items;
};
struct kv_item_t {
const full_key_t<KT>* p_key;
value_config_t value_config;
};
using var_t = std::variant<range_items_t, kv_item_t>;
public:
Appender(NodeExtentMutable* p_mut, char* p_append)
: p_mut{p_mut}, p_append{p_append} {
}
Appender(NodeExtentMutable* p_mut, const leaf_sub_items_t& sub_items)
: p_mut{p_mut} , op_dst(sub_items) {
assert(sub_items.keys());
}
void append(const leaf_sub_items_t& src, index_t from, index_t items);
void append(const full_key_t<KT>& key,
const value_config_t& value, const value_header_t*& p_value) {
// append from empty
assert(p_append);
assert(pp_value == nullptr);
assert(cnt <= APPENDER_LIMIT);
appends[cnt] = kv_item_t{&key, value};
++cnt;
pp_value = &p_value;
}
char* wrap();
private:
NodeExtentMutable* p_mut;
// append from empty
std::optional<leaf_sub_items_t> op_src;
const value_header_t** pp_value = nullptr;
char* p_append = nullptr;
var_t appends[APPENDER_LIMIT];
index_t cnt = 0;
// append from existing
std::optional<leaf_sub_items_t> op_dst;
char* p_appended = nullptr;
};
template <node_type_t> struct _sub_items_t;
template<> struct _sub_items_t<node_type_t::INTERNAL> { using type = internal_sub_items_t; };
template<> struct _sub_items_t<node_type_t::LEAF> { using type = leaf_sub_items_t; };
template <node_type_t NODE_TYPE>
using sub_items_t = typename _sub_items_t<NODE_TYPE>::type;
}
| 12,455 | 32.756098 | 93 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager/avlallocator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include "extent_allocator.h"
#include "include/ceph_assert.h"
#include "include/buffer_fwd.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/transaction.h"
#include <string.h>
#include "include/buffer.h"
#include <boost/intrusive/avl_set.hpp>
#include <optional>
#include <vector>
namespace crimson::os::seastore {
struct extent_range_t {
rbm_abs_addr start;
rbm_abs_addr end;
extent_range_t(rbm_abs_addr start, rbm_abs_addr end) :
start(start), end(end)
{}
struct before_t {
template<typename KeyLeft, typename KeyRight>
bool operator()(const KeyLeft& lhs, const KeyRight& rhs) const {
return lhs.end <= rhs.start;
}
};
boost::intrusive::avl_set_member_hook<> offset_hook;
struct shorter_t {
template<typename KeyType>
bool operator()(const extent_range_t& lhs, const KeyType& rhs) const {
auto lhs_size = lhs.length();
auto rhs_size = rhs.end - rhs.start;
if (lhs_size < rhs_size) {
return true;
} else if (lhs_size > rhs_size) {
return false;
} else {
return lhs.start < rhs.start;
}
}
};
size_t length() const {
return end - start;
}
boost::intrusive::avl_set_member_hook<> size_hook;
};
/*
* This is the simplest version of avlallocator from bluestore's avlallocator
*/
class AvlAllocator : public ExtentAllocator {
public:
AvlAllocator(bool detailed) :
detailed(detailed) {}
std::optional<interval_set<rbm_abs_addr>> alloc_extent(
size_t size) final;
void free_extent(rbm_abs_addr addr, size_t size) final;
void mark_extent_used(rbm_abs_addr addr, size_t size) final;
void init(rbm_abs_addr addr, size_t size, size_t b_size);
struct dispose_rs {
void operator()(extent_range_t* p)
{
delete p;
}
};
~AvlAllocator() {
close();
}
void close() {
if (!detailed) {
assert(reserved_extent_tracker.size() == 0);
}
extent_size_tree.clear();
extent_tree.clear_and_dispose(dispose_rs{});
total_size = 0;
block_size = 0;
available_size = 0;
base_addr = 0;
}
uint64_t get_available_size() const final {
return available_size;
}
uint64_t get_max_alloc_size() const final {
return max_alloc_size;
}
bool is_free_extent(rbm_abs_addr start, size_t size);
void complete_allocation(rbm_abs_addr start, size_t size) final {
if (detailed) {
assert(reserved_extent_tracker.contains(start, size));
reserved_extent_tracker.erase(start, size);
}
}
bool is_reserved_extent(rbm_abs_addr start, size_t size) {
if (detailed) {
return reserved_extent_tracker.contains(start, size);
}
return false;
}
rbm_extent_state_t get_extent_state(rbm_abs_addr addr, size_t size) final {
if (is_reserved_extent(addr, size)) {
return rbm_extent_state_t::RESERVED;
} else if (is_free_extent(addr, size)) {
return rbm_extent_state_t::FREE;
}
return rbm_extent_state_t::ALLOCATED;
}
private:
void _add_to_tree(rbm_abs_addr start, size_t size);
void _extent_size_tree_rm(extent_range_t& r) {
ceph_assert(available_size >= r.length());
available_size -= r.length();
extent_size_tree.erase(r);
}
void _extent_size_tree_try_insert(extent_range_t& r) {
extent_size_tree.insert(r);
available_size += r.length();
}
void _remove_from_tree(rbm_abs_addr start, rbm_abs_addr size);
rbm_abs_addr find_block(size_t size);
using extent_tree_t =
boost::intrusive::avl_set<
extent_range_t,
boost::intrusive::compare<extent_range_t::before_t>,
boost::intrusive::member_hook<
extent_range_t,
boost::intrusive::avl_set_member_hook<>,
&extent_range_t::offset_hook>>;
extent_tree_t extent_tree;
using extent_size_tree_t =
boost::intrusive::avl_set<
extent_range_t,
boost::intrusive::compare<extent_range_t::shorter_t>,
boost::intrusive::member_hook<
extent_range_t,
boost::intrusive::avl_set_member_hook<>,
&extent_range_t::size_hook>>;
extent_size_tree_t extent_size_tree;
uint64_t block_size = 0;
uint64_t available_size = 0;
uint64_t total_size = 0;
uint64_t base_addr = 0;
uint64_t max_alloc_size = 4 << 20;
bool detailed;
interval_set<rbm_abs_addr> reserved_extent_tracker;
};
}
| 4,399 | 24.142857 | 77 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager/block_rb_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iosfwd>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "crimson/os/seastore/seastore_types.h"
#include "include/buffer_fwd.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/transaction.h"
#include "rbm_device.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/common/layout.h"
#include "include/buffer.h"
#include "include/uuid.h"
#include "avlallocator.h"
namespace crimson::os::seastore {
using RBMDevice = random_block_device::RBMDevice;
using RBMDeviceRef = std::unique_ptr<RBMDevice>;
device_config_t get_rbm_ephemeral_device_config(
std::size_t index, std::size_t num_devices);
class BlockRBManager final : public RandomBlockManager {
public:
/*
* Ondisk layout (TODO)
*
* ---------------------------------------------------------------------------
* | rbm_metadata_header_t | metadatas | ... | data blocks |
* ---------------------------------------------------------------------------
*/
read_ertr::future<> read(paddr_t addr, bufferptr &buffer) final;
write_ertr::future<> write(paddr_t addr, bufferptr &buf) final;
open_ertr::future<> open() final;
close_ertr::future<> close() final;
/*
* alloc_extent
*
* The role of this function is to find out free blocks the transaction requires.
* To do so, alloc_extent() looks into both in-memory allocator
* and freebitmap blocks.
*
* TODO: multiple allocation
*
*/
paddr_t alloc_extent(size_t size) final; // allocator, return blocks
void complete_allocation(paddr_t addr, size_t size) final;
size_t get_start_rbm_addr() const {
return device->get_shard_journal_start() + device->get_journal_size();
}
size_t get_size() const final {
return device->get_shard_end() - get_start_rbm_addr();
};
extent_len_t get_block_size() const final { return device->get_block_size(); }
BlockRBManager(RBMDevice * device, std::string path, bool detailed)
: device(device), path(path) {
allocator.reset(new AvlAllocator(detailed));
}
write_ertr::future<> write(rbm_abs_addr addr, bufferlist &bl);
device_id_t get_device_id() const final {
assert(device);
return device->get_device_id();
}
uint64_t get_free_blocks() const final {
// TODO: return correct free blocks after block allocator is introduced
assert(device);
return get_size() / get_block_size();
}
const seastore_meta_t &get_meta() const final {
return device->get_meta();
}
RBMDevice* get_device() {
return device;
}
void mark_space_used(paddr_t paddr, size_t len) final {
assert(allocator);
rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr);
assert(addr >= get_start_rbm_addr() &&
addr + len <= device->get_shard_end());
allocator->mark_extent_used(addr, len);
}
void mark_space_free(paddr_t paddr, size_t len) final {
assert(allocator);
rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr);
assert(addr >= get_start_rbm_addr() &&
addr + len <= device->get_shard_end());
allocator->free_extent(addr, len);
}
paddr_t get_start() final {
return convert_abs_addr_to_paddr(
get_start_rbm_addr(),
device->get_device_id());
}
rbm_extent_state_t get_extent_state(paddr_t paddr, size_t size) final {
assert(allocator);
rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr);
assert(addr >= get_start_rbm_addr() &&
addr + size <= device->get_shard_end());
return allocator->get_extent_state(addr, size);
}
size_t get_journal_size() const final {
return device->get_journal_size();
}
private:
/*
* this contains the number of bitmap blocks, free blocks and
* rbm specific information
*/
ExtentAllocatorRef allocator;
RBMDevice * device;
std::string path;
int stream_id; // for multi-stream
};
using BlockRBManagerRef = std::unique_ptr<BlockRBManager>;
}
| 4,130 | 27.888112 | 83 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager/extent_allocator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "include/interval_set.h"
namespace crimson::os::seastore {
class ExtentAllocator {
public:
/**
* alloc_extent
*
* Allocate continous region as much as given size.
* Note that the inital state of extent is RESERVED after alloc_extent().
* see rbm_extent_state_t in random_block_manager.h
*
* @param size
* @return nullopt or the address range (rbm_abs_addr, len)
*/
virtual std::optional<interval_set<rbm_abs_addr>> alloc_extent(
size_t size) = 0;
/**
* free_extent
*
* free given region
*
* @param rbm_abs_addr
* @param size
*/
virtual void free_extent(rbm_abs_addr addr, size_t size) = 0;
/**
* mark_extent_used
*
* This marks given region as used without alloc_extent.
*
* @param rbm_abs_addr
* @param size
*/
virtual void mark_extent_used(rbm_abs_addr addr, size_t size) = 0;
/**
* init
*
* Initialize the address space the ExtentAllocator will manage
*
* @param start address (rbm_abs_addr)
* @param total size
* @param block size
*/
virtual void init(rbm_abs_addr addr, size_t size, size_t b_size) = 0;
virtual uint64_t get_available_size() const = 0;
virtual uint64_t get_max_alloc_size() const = 0;
virtual void close() = 0;
/**
* complete_allocation
*
* This changes this extent state from RESERVED to ALLOCATED
*
* @param start address
* @param size
*/
virtual void complete_allocation(rbm_abs_addr start, size_t size) = 0;
virtual rbm_extent_state_t get_extent_state(rbm_abs_addr addr, size_t size) = 0;
virtual ~ExtentAllocator() {}
};
using ExtentAllocatorRef = std::unique_ptr<ExtentAllocator>;
}
| 2,021 | 25.605263 | 82 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager/nvme_block_device.h | //-*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include <vector>
#include <seastar/core/file.hh>
#include <linux/nvme_ioctl.h>
#include "crimson/osd/exceptions.h"
#include "crimson/common/layout.h"
#include "rbm_device.h"
namespace ceph {
namespace buffer {
class bufferptr;
}
}
namespace crimson::os::seastore::random_block_device::nvme {
/*
* NVMe protocol structures (nvme_XX, identify_XX)
*
* All structures relative to NVMe protocol are following NVMe protocol v1.4
* (latest). NVMe is protocol for fast interfacing between user and SSD device.
* We selectively adopted features among various NVMe features to ease
* implementation. And also, NVMeBlockDevice provides generic command submission
* APIs for IO and Admin commands. Please use pass_through_io() and pass_admin()
* to do it.
*
* For more information about NVMe protocol, refer https://nvmexpress.org/
*/
struct nvme_identify_command_t {
uint32_t common_dw[10];
uint32_t cns : 8;
uint32_t reserved : 8;
uint32_t cnt_id : 16;
static const uint8_t CNS_NAMESPACE = 0x00;
static const uint8_t CNS_CONTROLLER = 0x01;
};
struct nvme_admin_command_t {
union {
nvme_passthru_cmd common;
nvme_identify_command_t identify;
};
static const uint8_t OPCODE_IDENTIFY = 0x06;
};
// Optional Admin Command Support (OACS)
// Indicates optional commands are supported by SSD or not
struct oacs_t {
uint16_t unused : 5;
uint16_t support_directives : 1; // Support multi-stream
uint16_t unused2 : 10;
};
struct nvme_identify_controller_data_t {
union {
struct {
uint8_t unused[256]; // [255:0]
oacs_t oacs; // [257:256]
uint8_t unused2[270]; // [527:258]
uint16_t awupf; // [529:528]
};
uint8_t raw[4096];
};
};
// End-to-end Data Protection Capabilities (DPC)
// Indicates type of E2E data protection supported by SSD
struct dpc_t {
uint8_t support_type1 : 1;
uint8_t support_type2 : 1;
uint8_t support_type3 : 1;
uint8_t support_first_meta : 1;
uint8_t support_last_meta : 1;
uint8_t reserved : 3;
};
// End-to-end Data Protection Type Settings (DPS)
// Indicates enabled type of E2E data protection
struct dps_t {
uint8_t protection_type : 3;
uint8_t protection_info : 1;
uint8_t reserved : 4;
};
// Namespace Features (NSFEAT)
// Indicates features of namespace
struct nsfeat_t {
uint8_t thinp : 1;
uint8_t nsabp : 1;
uint8_t dae : 1;
uint8_t uid_reuse : 1;
uint8_t opterf : 1; // Support NPWG, NPWA
uint8_t reserved : 3;
};
// LBA Format (LBAF)
// Indicates LBA format (metadata size, data size, performance)
struct lbaf_t {
uint32_t ms : 16;
uint32_t lbads : 8;
uint32_t rp : 2;
uint32_t reserved : 6;
};
struct nvme_identify_namespace_data_t {
union {
struct {
uint8_t unused[24]; // [23:0]
nsfeat_t nsfeat; // [24]
uint8_t unused2[3]; // [27:25]
dpc_t dpc; // [28]
dps_t dps; // [29]
uint8_t unused3[34]; // [63:30]
uint16_t npwg; // [65:64]
uint16_t npwa; // [67:66]
uint8_t unused4[60]; // [127:68]
lbaf_t lbaf0; // [131:128]
};
uint8_t raw[4096];
};
};
struct nvme_rw_command_t {
uint32_t common_dw[10];
uint64_t s_lba;
uint32_t nlb : 16; // 0's based value
uint32_t reserved : 4;
uint32_t d_type : 4;
uint32_t reserved2 : 2;
uint32_t prinfo_prchk : 3;
uint32_t prinfo_pract : 1;
uint32_t fua : 1;
uint32_t lr : 1;
uint32_t reserved3 : 16;
uint32_t dspec : 16;
static const uint32_t DTYPE_STREAM = 1;
};
struct nvme_io_command_t {
union {
nvme_passthru_cmd common;
nvme_rw_command_t rw;
};
static const uint8_t OPCODE_WRITE = 0x01;
static const uint8_t OPCODE_READ = 0x01;
};
/*
* Implementation of NVMeBlockDevice with POSIX APIs
*
* NVMeBlockDevice provides NVMe SSD interfaces through POSIX APIs which is
* generally available at most operating environment.
*/
class NVMeBlockDevice : public RBMDevice {
public:
/*
* Service NVMe device relative size
*
* size : total size of device in byte.
*
* block_size : IO unit size in byte. Caller should follow every IO command
* aligned with block size.
*
* preffered_write_granularity(PWG), preffered_write_alignment(PWA) : IO unit
* size for write in byte. Caller should request every write IO sized multiple
* times of PWG and aligned starting address by PWA. Available only if NVMe
* Device supports NVMe protocol 1.4 or later versions.
* atomic_write_unit : The maximum size of write whose atomicity is guranteed
* by SSD even on power failure. The write equal to or smaller than
* atomic_write_unit does not require fsync().
*/
NVMeBlockDevice(std::string device_path) : device_path(device_path) {}
~NVMeBlockDevice() = default;
open_ertr::future<> open(
const std::string &in_path,
seastar::open_flags mode) override;
write_ertr::future<> write(
uint64_t offset,
bufferptr &&bptr,
uint16_t stream = 0) override;
using RBMDevice::read;
read_ertr::future<> read(
uint64_t offset,
bufferptr &bptr) final;
close_ertr::future<> close() override;
discard_ertr::future<> discard(
uint64_t offset,
uint64_t len) override;
mount_ret mount() final;
mkfs_ret mkfs(device_config_t config) final;
write_ertr::future<> writev(
uint64_t offset,
ceph::bufferlist bl,
uint16_t stream = 0) final;
stat_device_ret stat_device() final {
return seastar::file_stat(device_path, seastar::follow_symlink::yes
).handle_exception([](auto e) -> stat_device_ret {
return crimson::ct_error::input_output_error::make();
}).then([this](auto stat) {
return seastar::open_file_dma(
device_path,
seastar::open_flags::rw | seastar::open_flags::dsync
).then([this, stat](auto file) mutable {
return file.size().then([this, stat, file](auto size) mutable {
stat.size = size;
return identify_namespace(file
).safe_then([stat] (auto id_namespace_data) mutable {
// LBA format provides LBA size which is power of 2. LBA is the
// minimum size of read and write.
stat.block_size = (1 << id_namespace_data.lbaf0.lbads);
if (stat.block_size < RBM_SUPERBLOCK_SIZE) {
stat.block_size = RBM_SUPERBLOCK_SIZE;
}
return stat_device_ret(
read_ertr::ready_future_marker{},
stat
);
}).handle_error(crimson::ct_error::input_output_error::handle(
[stat]{
return stat_device_ret(
read_ertr::ready_future_marker{},
stat
);
}), crimson::ct_error::pass_further_all{});
}).safe_then([file](auto st) mutable {
return file.close(
).then([st] {
return stat_device_ret(
read_ertr::ready_future_marker{},
st
);
});
});
});
});
}
std::string get_device_path() const final {
return device_path;
}
seastar::future<> start() final {
return shard_devices.start(device_path);
}
seastar::future<> stop() final {
return shard_devices.stop();
}
Device& get_sharded_device() final {
return shard_devices.local();
}
uint64_t get_preffered_write_granularity() const { return write_granularity; }
uint64_t get_preffered_write_alignment() const { return write_alignment; }
uint64_t get_atomic_write_unit() const { return atomic_write_unit; }
/*
* End-to-End Data Protection
*
* NVMe device keeps track of data integrity similar with checksum. Client can
* offload checksuming to NVMe device to reduce its CPU utilization. If data
* protection is enabled, checksum is calculated on every write and used to
* verify data on every read.
*/
bool is_data_protection_enabled() const { return data_protection_enabled; }
/*
* Data Health
*
* Returns list of LBAs which have almost corrupted data. Data of the LBAs
* will be corrupted very soon. Caller can overwrite, unmap or refresh data to
* protect data
*/
virtual nvme_command_ertr::future<std::list<uint64_t>> get_data_health() {
std::list<uint64_t> fragile_lbas;
return nvme_command_ertr::future<std::list<uint64_t>>(
nvme_command_ertr::ready_future_marker{},
fragile_lbas
);
}
/*
* Recovery Level
*
* Regulate magnitude of SSD-internal data recovery. Caller can get good read
* latency with lower magnitude.
*/
virtual nvme_command_ertr::future<> set_data_recovery_level(
uint32_t level) { return nvme_command_ertr::now(); }
/*
* For passsing through nvme IO or Admin command to SSD
* Caller can construct and execute its own nvme command
*/
nvme_command_ertr::future<int> pass_admin(
nvme_admin_command_t& admin_cmd, seastar::file f);
nvme_command_ertr::future<int> pass_through_io(
nvme_io_command_t& io_cmd);
bool support_multistream = false;
uint8_t data_protection_type = 0;
/*
* Predictable Latency
*
* NVMe device can guarantee IO latency within pre-defined time window. This
* functionality will be analyzed soon.
*/
private:
// identify_controller/namespace are used to get SSD internal information such
// as supported features, NPWG and NPWA
nvme_command_ertr::future<nvme_identify_controller_data_t>
identify_controller(seastar::file f);
nvme_command_ertr::future<nvme_identify_namespace_data_t>
identify_namespace(seastar::file f);
nvme_command_ertr::future<int> get_nsid(seastar::file f);
open_ertr::future<> open_for_io(
const std::string& in_path,
seastar::open_flags mode);
seastar::file device;
std::vector<seastar::file> io_device;
uint32_t stream_index_to_open = WRITE_LIFE_NOT_SET;
uint32_t stream_id_count = 1; // stream is disabled, defaultly.
uint32_t awupf = 0;
uint64_t write_granularity = 4096;
uint64_t write_alignment = 4096;
uint32_t atomic_write_unit = 4096;
bool data_protection_enabled = false;
std::string device_path;
seastar::sharded<NVMeBlockDevice> shard_devices;
};
}
| 10,069 | 26.894737 | 80 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager/rbm_device.h | //-*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/device.h"
namespace ceph {
namespace buffer {
class bufferptr;
}
}
namespace crimson::os::seastore::random_block_device {
// from blk/BlockDevice.h
#if defined(__linux__)
#if !defined(F_SET_FILE_RW_HINT)
#define F_LINUX_SPECIFIC_BASE 1024
#define F_SET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 14)
#endif
// These values match Linux definition
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/fcntl.h#n56
#define WRITE_LIFE_NOT_SET 0 // No hint information set
#define WRITE_LIFE_NONE 1 // No hints about write life time
#define WRITE_LIFE_SHORT 2 // Data written has a short life time
#define WRITE_LIFE_MEDIUM 3 // Data written has a medium life time
#define WRITE_LIFE_LONG 4 // Data written has a long life time
#define WRITE_LIFE_EXTREME 5 // Data written has an extremely long life time
#define WRITE_LIFE_MAX 6
#else
// On systems don't have WRITE_LIFE_* only use one FD
// And all files are created equal
#define WRITE_LIFE_NOT_SET 0 // No hint information set
#define WRITE_LIFE_NONE 0 // No hints about write life time
#define WRITE_LIFE_SHORT 0 // Data written has a short life time
#define WRITE_LIFE_MEDIUM 0 // Data written has a medium life time
#define WRITE_LIFE_LONG 0 // Data written has a long life time
#define WRITE_LIFE_EXTREME 0 // Data written has an extremely long life time
#define WRITE_LIFE_MAX 1
#endif
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
using write_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::ebadf,
crimson::ct_error::enospc>;
using open_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent>;
using nvme_command_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using discard_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
constexpr uint32_t RBM_SUPERBLOCK_SIZE = 4096;
enum {
// TODO: This allows the device to manage crc on a block by itself
RBM_NVME_END_TO_END_PROTECTION = 1,
RBM_BITMAP_BLOCK_CRC = 2,
};
class RBMDevice : public Device {
public:
using Device::read;
read_ertr::future<> read (
paddr_t addr,
size_t len,
ceph::bufferptr &out) final {
uint64_t rbm_addr = convert_paddr_to_abs_addr(addr);
return read(rbm_addr, out);
}
protected:
rbm_metadata_header_t super;
rbm_shard_info_t shard_info;
public:
RBMDevice() {}
virtual ~RBMDevice() = default;
template <typename T>
static std::unique_ptr<T> create() {
return std::make_unique<T>();
}
device_id_t get_device_id() const {
return super.config.spec.id;
}
magic_t get_magic() const final {
return super.config.spec.magic;
}
device_type_t get_device_type() const final {
return device_type_t::RANDOM_BLOCK_SSD;
}
backend_type_t get_backend_type() const final {
return backend_type_t::RANDOM_BLOCK;
}
const seastore_meta_t &get_meta() const final {
return super.config.meta;
}
secondary_device_set_t& get_secondary_devices() final {
return super.config.secondary_devices;
}
std::size_t get_available_size() const { return super.size; }
extent_len_t get_block_size() const { return super.block_size; }
virtual read_ertr::future<> read(
uint64_t offset,
bufferptr &bptr) = 0;
/*
* Multi-stream write
*
* Give hint to device about classification of data whose life time is similar
* with each other. Data with same stream value will be managed together in
* SSD for better write performance.
*/
virtual write_ertr::future<> write(
uint64_t offset,
bufferptr &&bptr,
uint16_t stream = 0) = 0;
virtual discard_ertr::future<> discard(
uint64_t offset,
uint64_t len) { return seastar::now(); }
virtual open_ertr::future<> open(
const std::string& path,
seastar::open_flags mode) = 0;
virtual write_ertr::future<> writev(
uint64_t offset,
ceph::bufferlist bl,
uint16_t stream = 0) = 0;
bool is_data_protection_enabled() const { return false; }
mkfs_ret do_mkfs(device_config_t);
// shard 0 mkfs
mkfs_ret do_primary_mkfs(device_config_t, int shard_num, size_t journal_size);
mount_ret do_mount();
mount_ret do_shard_mount();
write_ertr::future<> write_rbm_header();
read_ertr::future<rbm_metadata_header_t> read_rbm_header(rbm_abs_addr addr);
using stat_device_ret =
read_ertr::future<seastar::stat_data>;
virtual stat_device_ret stat_device() = 0;
virtual std::string get_device_path() const = 0;
uint64_t get_journal_size() const {
return super.journal_size;
}
static rbm_abs_addr get_shard_reserved_size() {
return RBM_SUPERBLOCK_SIZE;
}
rbm_abs_addr get_shard_journal_start() {
return shard_info.start_offset + get_shard_reserved_size();
}
uint64_t get_shard_start() const {
return shard_info.start_offset;
}
uint64_t get_shard_end() const {
return shard_info.start_offset + shard_info.size;
}
};
using RBMDeviceRef = std::unique_ptr<RBMDevice>;
constexpr uint64_t DEFAULT_TEST_CBJOURNAL_SIZE = 1 << 26;
class EphemeralRBMDevice : public RBMDevice {
public:
uint64_t size = 0;
uint64_t block_size = 0;
constexpr static uint32_t TEST_BLOCK_SIZE = 4096;
EphemeralRBMDevice(size_t size, uint64_t block_size) :
size(size), block_size(block_size), buf(nullptr) {
}
~EphemeralRBMDevice() {
if (buf) {
::munmap(buf, size);
buf = nullptr;
}
}
std::size_t get_available_size() const final { return size; }
extent_len_t get_block_size() const final { return block_size; }
mount_ret mount() final;
mkfs_ret mkfs(device_config_t config) final;
open_ertr::future<> open(
const std::string &in_path,
seastar::open_flags mode) override;
write_ertr::future<> write(
uint64_t offset,
bufferptr &&bptr,
uint16_t stream = 0) override;
using RBMDevice::read;
read_ertr::future<> read(
uint64_t offset,
bufferptr &bptr) override;
close_ertr::future<> close() override;
write_ertr::future<> writev(
uint64_t offset,
ceph::bufferlist bl,
uint16_t stream = 0) final;
stat_device_ret stat_device() final {
seastar::stat_data stat;
stat.block_size = block_size;
stat.size = size;
return stat_device_ret(
read_ertr::ready_future_marker{},
stat
);
}
std::string get_device_path() const final {
return "";
}
char *buf;
};
using EphemeralRBMDeviceRef = std::unique_ptr<EphemeralRBMDevice>;
EphemeralRBMDeviceRef create_test_ephemeral(
uint64_t journal_size = DEFAULT_TEST_CBJOURNAL_SIZE,
uint64_t data_size = DEFAULT_TEST_CBJOURNAL_SIZE);
}
| 7,167 | 26.358779 | 105 | h |
null | ceph-main/src/crimson/os/seastore/segment_manager/block.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/file.hh>
#include <seastar/core/future.hh>
#include <seastar/core/reactor.hh>
#include "crimson/common/layout.h"
#include "crimson/os/seastore/segment_manager.h"
namespace crimson::os::seastore::segment_manager::block {
using write_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
/**
* SegmentStateTracker
*
* Tracks lifecycle state of each segment using space at the beginning
* of the drive.
*/
class SegmentStateTracker {
using segment_state_t = Segment::segment_state_t;
bufferptr bptr;
using L = absl::container_internal::Layout<uint8_t>;
const L layout;
public:
static size_t get_raw_size(size_t segments, size_t block_size) {
return p2roundup(segments, block_size);
}
SegmentStateTracker(size_t segments, size_t block_size)
: bptr(ceph::buffer::create_page_aligned(
get_raw_size(segments, block_size))),
layout(bptr.length())
{
::memset(
bptr.c_str(),
static_cast<char>(segment_state_t::EMPTY),
bptr.length());
}
size_t get_size() const {
return bptr.length();
}
size_t get_capacity() const {
return bptr.length();
}
segment_state_t get(device_segment_id_t offset) const {
assert(offset < get_capacity());
return static_cast<segment_state_t>(
layout.template Pointer<0>(
bptr.c_str())[offset]);
}
void set(device_segment_id_t offset, segment_state_t state) {
assert(offset < get_capacity());
layout.template Pointer<0>(bptr.c_str())[offset] =
static_cast<uint8_t>(state);
}
write_ertr::future<> write_out(
device_id_t device_id,
seastar::file &device,
uint64_t offset);
read_ertr::future<> read_in(
device_id_t device_id,
seastar::file &device,
uint64_t offset);
};
class BlockSegmentManager;
class BlockSegment final : public Segment {
friend class BlockSegmentManager;
BlockSegmentManager &manager;
const segment_id_t id;
segment_off_t write_pointer = 0;
public:
BlockSegment(BlockSegmentManager &manager, segment_id_t id);
segment_id_t get_segment_id() const final { return id; }
segment_off_t get_write_capacity() const final;
segment_off_t get_write_ptr() const final { return write_pointer; }
close_ertr::future<> close() final;
write_ertr::future<> write(segment_off_t offset, ceph::bufferlist bl) final;
write_ertr::future<> advance_wp(segment_off_t offset) final;
~BlockSegment() {}
};
/**
* BlockSegmentManager
*
* Implements SegmentManager on a conventional block device.
* SegmentStateTracker uses space at the start of the device to store
* state analagous to that of the segments of a zns device.
*/
class BlockSegmentManager final : public SegmentManager {
// interfaces used by Device
public:
seastar::future<> start() {
return shard_devices.start(device_path, superblock.config.spec.dtype);
}
seastar::future<> stop() {
return shard_devices.stop();
}
Device& get_sharded_device() final {
return shard_devices.local();
}
mount_ret mount() final;
mkfs_ret mkfs(device_config_t) final;
// interfaces used by each shard device
public:
close_ertr::future<> close();
BlockSegmentManager(
const std::string &path,
device_type_t dtype)
: device_path(path) {
ceph_assert(get_device_type() == device_type_t::NONE);
superblock.config.spec.dtype = dtype;
}
~BlockSegmentManager();
open_ertr::future<SegmentRef> open(segment_id_t id) final;
release_ertr::future<> release(segment_id_t id) final;
read_ertr::future<> read(
paddr_t addr,
size_t len,
ceph::bufferptr &out) final;
device_type_t get_device_type() const final {
return superblock.config.spec.dtype;
}
size_t get_available_size() const final {
return shard_info.size;
}
extent_len_t get_block_size() const {
return superblock.block_size;
}
segment_off_t get_segment_size() const {
return superblock.segment_size;
}
device_id_t get_device_id() const final {
assert(device_id <= DEVICE_ID_MAX_VALID);
return device_id;
}
secondary_device_set_t& get_secondary_devices() final {
return superblock.config.secondary_devices;
}
// public so tests can bypass segment interface when simpler
Segment::write_ertr::future<> segment_write(
paddr_t addr,
ceph::bufferlist bl,
bool ignore_check=false);
magic_t get_magic() const final {
return superblock.config.spec.magic;
}
private:
friend class BlockSegment;
using segment_state_t = Segment::segment_state_t;
struct effort_t {
uint64_t num = 0;
uint64_t bytes = 0;
void increment(uint64_t read_bytes) {
++num;
bytes += read_bytes;
}
};
struct {
effort_t data_read;
effort_t data_write;
effort_t metadata_write;
uint64_t opened_segments;
uint64_t closed_segments;
uint64_t closed_segments_unused_bytes;
uint64_t released_segments;
void reset() {
data_read = {};
data_write = {};
metadata_write = {};
opened_segments = 0;
closed_segments = 0;
closed_segments_unused_bytes = 0;
released_segments = 0;
}
} stats;
void register_metrics();
seastar::metrics::metric_group metrics;
std::string device_path;
std::unique_ptr<SegmentStateTracker> tracker;
block_shard_info_t shard_info;
block_sm_superblock_t superblock;
seastar::file device;
void set_device_id(device_id_t id) {
assert(id <= DEVICE_ID_MAX_VALID);
assert(device_id == DEVICE_ID_NULL ||
device_id == id);
device_id = id;
}
device_id_t device_id = DEVICE_ID_NULL;
size_t get_offset(paddr_t addr) {
auto& seg_addr = addr.as_seg_paddr();
return shard_info.first_segment_offset +
(seg_addr.get_segment_id().device_segment_id() * superblock.segment_size) +
seg_addr.get_segment_off();
}
const seastore_meta_t &get_meta() const {
return superblock.config.meta;
}
std::vector<segment_state_t> segment_state;
char *buffer = nullptr;
Segment::close_ertr::future<> segment_close(
segment_id_t id, segment_off_t write_pointer);
private:
// shard 0 mkfs
mkfs_ret primary_mkfs(device_config_t);
// all shards mkfs
mkfs_ret shard_mkfs();
// all shards mount
mount_ret shard_mount();
seastar::sharded<BlockSegmentManager> shard_devices;
};
}
| 6,606 | 24.121673 | 81 | h |
null | ceph-main/src/crimson/os/seastore/segment_manager/ephemeral.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "crimson/os/seastore/segment_manager.h"
#include "crimson/os/seastore/segment_manager/ephemeral.h"
namespace crimson::os::seastore::segment_manager {
class EphemeralSegmentManager;
using EphemeralSegmentManagerRef = std::unique_ptr<EphemeralSegmentManager>;
struct ephemeral_config_t {
size_t size = 0;
size_t block_size = 0;
size_t segment_size = 0;
void validate() const {
ceph_assert_always(size > 0);
ceph_assert_always(size <= DEVICE_OFF_MAX);
ceph_assert_always(segment_size > 0);
ceph_assert_always(segment_size <= SEGMENT_OFF_MAX);
ceph_assert_always(size / segment_size > 0);
ceph_assert_always(size / segment_size <= DEVICE_SEGMENT_ID_MAX);
}
};
constexpr ephemeral_config_t DEFAULT_TEST_EPHEMERAL = {
1 << 30,
4 << 10,
8 << 20
};
std::ostream &operator<<(std::ostream &, const ephemeral_config_t &);
EphemeralSegmentManagerRef create_test_ephemeral();
device_config_t get_ephemeral_device_config(
std::size_t index,
std::size_t num_main_devices,
std::size_t num_cold_devices);
class EphemeralSegment final : public Segment {
friend class EphemeralSegmentManager;
EphemeralSegmentManager &manager;
const segment_id_t id;
segment_off_t write_pointer = 0;
public:
EphemeralSegment(EphemeralSegmentManager &manager, segment_id_t id);
segment_id_t get_segment_id() const final { return id; }
segment_off_t get_write_capacity() const final;
segment_off_t get_write_ptr() const final { return write_pointer; }
close_ertr::future<> close() final;
write_ertr::future<> write(segment_off_t offset, ceph::bufferlist bl) final;
write_ertr::future<> advance_wp(segment_off_t offset) final;
~EphemeralSegment() {}
};
class EphemeralSegmentManager final : public SegmentManager {
friend class EphemeralSegment;
using segment_state_t = Segment::segment_state_t;
const ephemeral_config_t config;
std::optional<device_config_t> device_config;
device_type_t get_device_type() const final {
assert(device_config);
return device_config->spec.dtype;
}
size_t get_offset(paddr_t addr) {
auto& seg_addr = addr.as_seg_paddr();
return (seg_addr.get_segment_id().device_segment_id() * config.segment_size) +
seg_addr.get_segment_off();
}
std::vector<segment_state_t> segment_state;
char *buffer = nullptr;
Segment::close_ertr::future<> segment_close(segment_id_t id);
public:
EphemeralSegmentManager(
ephemeral_config_t config)
: config(config) {
config.validate();
}
~EphemeralSegmentManager();
close_ertr::future<> close() final {
return close_ertr::now();
}
device_id_t get_device_id() const final {
assert(device_config);
return device_config->spec.id;
}
mount_ret mount() final {
return mount_ertr::now();
}
mkfs_ret mkfs(device_config_t) final;
open_ertr::future<SegmentRef> open(segment_id_t id) final;
release_ertr::future<> release(segment_id_t id) final;
read_ertr::future<> read(
paddr_t addr,
size_t len,
ceph::bufferptr &out) final;
size_t get_available_size() const final {
return config.size;
}
extent_len_t get_block_size() const final {
return config.block_size;
}
segment_off_t get_segment_size() const final {
return config.segment_size;
}
const seastore_meta_t &get_meta() const final {
assert(device_config);
return device_config->meta;
}
secondary_device_set_t& get_secondary_devices() final {
assert(device_config);
return device_config->secondary_devices;
}
magic_t get_magic() const final {
return device_config->spec.magic;
}
using init_ertr = crimson::errorator<
crimson::ct_error::enospc,
crimson::ct_error::invarg>;
init_ertr::future<> init();
void remount();
// public so tests can bypass segment interface when simpler
Segment::write_ertr::future<> segment_write(
paddr_t addr,
ceph::bufferlist bl,
bool ignore_check=false);
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::segment_manager::ephemeral_config_t> : fmt::ostream_formatter {};
#endif
| 4,354 | 25.077844 | 122 | h |
null | ceph-main/src/crimson/os/seastore/segment_manager/zbd.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <linux/blkzoned.h>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/file.hh>
#include <seastar/core/future.hh>
#include <seastar/core/reactor.hh>
#include "crimson/common/layout.h"
#include "crimson/os/seastore/segment_manager.h"
#include "include/uuid.h"
namespace crimson::os::seastore::segment_manager::zbd {
struct zbd_shard_info_t {
size_t size = 0;
size_t segments = 0;
size_t first_segment_offset = 0;
DENC(zbd_shard_info_t, v, p) {
DENC_START(1, 1, p);
denc(v.size, p);
denc(v.segments, p);
denc(v.first_segment_offset, p);
DENC_FINISH(p);
}
};
struct zbd_sm_metadata_t {
unsigned int shard_num = 0;
size_t segment_size = 0;
size_t segment_capacity = 0;
size_t zones_per_segment = 0;
size_t zone_capacity = 0;
size_t block_size = 0;
size_t zone_size = 0;
std::vector<zbd_shard_info_t> shard_infos;
seastore_meta_t meta;
bool major_dev = false;
magic_t magic = 0;
device_type_t dtype = device_type_t::NONE;
device_id_t device_id = 0;
secondary_device_set_t secondary_devices;
DENC(zbd_sm_metadata_t, v, p) {
DENC_START(1, 1, p);
denc(v.shard_num, p);
denc(v.segment_size, p);
denc(v.segment_capacity, p);
denc(v.zones_per_segment, p);
denc(v.zone_capacity, p);
denc(v.block_size, p);
denc(v.zone_size, p);
denc(v.shard_infos, p);
denc(v.meta, p);
denc(v.magic, p);
denc(v.dtype, p);
denc(v.device_id, p);
if (v.major_dev) {
denc(v.secondary_devices, p);
}
DENC_FINISH(p);
}
void validate() const {
ceph_assert_always(shard_num == seastar::smp::count);
for (unsigned int i = 0; i < seastar::smp::count; i++) {
ceph_assert_always(shard_infos[i].size > 0);
ceph_assert_always(shard_infos[i].size <= DEVICE_OFF_MAX);
ceph_assert_always(shard_infos[i].segments > 0);
ceph_assert_always(shard_infos[i].segments <= DEVICE_SEGMENT_ID_MAX);
}
ceph_assert_always(segment_capacity > 0);
ceph_assert_always(segment_capacity <= SEGMENT_OFF_MAX);
}
};
using write_ertr = crimson::errorator<crimson::ct_error::input_output_error>;
using read_ertr = crimson::errorator<crimson::ct_error::input_output_error>;
enum class zone_op {
OPEN,
FINISH,
CLOSE,
RESET,
};
class ZBDSegmentManager;
class ZBDSegment final : public Segment {
public:
ZBDSegment(ZBDSegmentManager &man, segment_id_t i) : manager(man), id(i){};
segment_id_t get_segment_id() const final { return id; }
segment_off_t get_write_capacity() const final;
segment_off_t get_write_ptr() const final { return write_pointer; }
close_ertr::future<> close() final;
write_ertr::future<> write(segment_off_t offset, ceph::bufferlist bl) final;
write_ertr::future<> advance_wp(segment_off_t offset) final;
~ZBDSegment() {}
private:
friend class ZBDSegmentManager;
ZBDSegmentManager &manager;
const segment_id_t id;
segment_off_t write_pointer = 0;
write_ertr::future<> write_padding_bytes(size_t padding_bytes);
};
class ZBDSegmentManager final : public SegmentManager{
// interfaces used by Device
public:
seastar::future<> start() {
return shard_devices.start(device_path);
}
seastar::future<> stop() {
return shard_devices.stop();
}
Device& get_sharded_device() final {
return shard_devices.local();
}
mount_ret mount() final;
mkfs_ret mkfs(device_config_t meta) final;
ZBDSegmentManager(const std::string &path) : device_path(path) {}
~ZBDSegmentManager() final = default;
//interfaces used by each shard device
public:
open_ertr::future<SegmentRef> open(segment_id_t id) final;
close_ertr::future<> close() final;
release_ertr::future<> release(segment_id_t id) final;
read_ertr::future<> read(
paddr_t addr,
size_t len,
ceph::bufferptr &out) final;
device_type_t get_device_type() const final {
return device_type_t::ZBD;
}
size_t get_available_size() const final {
return shard_info.size;
};
extent_len_t get_block_size() const final {
return metadata.block_size;
};
segment_off_t get_segment_size() const final {
return metadata.segment_capacity;
};
const seastore_meta_t &get_meta() const {
return metadata.meta;
};
device_id_t get_device_id() const final;
secondary_device_set_t& get_secondary_devices() final;
magic_t get_magic() const final;
Segment::write_ertr::future<> segment_write(
paddr_t addr,
ceph::bufferlist bl,
bool ignore_check=false);
private:
friend class ZBDSegment;
std::string device_path;
zbd_shard_info_t shard_info;
zbd_sm_metadata_t metadata;
seastar::file device;
uint32_t nr_zones;
struct effort_t {
uint64_t num = 0;
uint64_t bytes = 0;
void increment(uint64_t read_bytes) {
++num;
bytes += read_bytes;
}
};
struct zbd_sm_stats {
effort_t data_read = {};
effort_t data_write = {};
effort_t metadata_write = {};
uint64_t opened_segments = 0;
uint64_t closed_segments = 0;
uint64_t closed_segments_unused_bytes = 0;
uint64_t released_segments = 0;
void reset() {
*this = zbd_sm_stats{};
}
} stats;
void register_metrics();
seastar::metrics::metric_group metrics;
Segment::close_ertr::future<> segment_close(
segment_id_t id, segment_off_t write_pointer);
uint64_t get_offset(paddr_t addr) {
auto& seg_addr = addr.as_seg_paddr();
return (shard_info.first_segment_offset +
(seg_addr.get_segment_id().device_segment_id() *
metadata.segment_size)) + seg_addr.get_segment_off();
}
private:
// shard 0 mkfs
mkfs_ret primary_mkfs(device_config_t meta);
// all shards mkfs
mkfs_ret shard_mkfs();
mount_ret shard_mount();
seastar::sharded<ZBDSegmentManager> shard_devices;
};
}
WRITE_CLASS_DENC_BOUNDED(
crimson::os::seastore::segment_manager::zbd::zbd_shard_info_t
)
WRITE_CLASS_DENC_BOUNDED(
crimson::os::seastore::segment_manager::zbd::zbd_sm_metadata_t
)
| 6,464 | 25.174089 | 80 | h |
null | ceph-main/src/crimson/osd/backfill_facades.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/osd/backfill_state.h"
#include "crimson/osd/pg.h"
#include "osd/PeeringState.h"
namespace crimson::osd {
// PeeringFacade -- main implementation of the BackfillState::PeeringFacade
// interface. We have the abstraction to decuple BackfillState from Peering
// State, and thus cut depedencies in unit testing. The second implemention
// is BackfillFixture::PeeringFacade and sits in test_backfill.cc.
struct PeeringFacade final : BackfillState::PeeringFacade {
PeeringState& peering_state;
hobject_t earliest_backfill() const override {
return peering_state.earliest_backfill();
}
const std::set<pg_shard_t>& get_backfill_targets() const override {
return peering_state.get_backfill_targets();
}
const hobject_t& get_peer_last_backfill(pg_shard_t peer) const override {
return peering_state.get_peer_info(peer).last_backfill;
}
const eversion_t& get_last_update() const override {
return peering_state.get_info().last_update;
}
const eversion_t& get_log_tail() const override {
return peering_state.get_info().log_tail;
}
void scan_log_after(eversion_t v, scan_log_func_t f) const override {
peering_state.get_pg_log().get_log().scan_log_after(v, std::move(f));
}
bool is_backfill_target(pg_shard_t peer) const override {
return peering_state.is_backfill_target(peer);
}
void update_complete_backfill_object_stats(const hobject_t &hoid,
const pg_stat_t &stats) override {
peering_state.update_complete_backfill_object_stats(hoid, stats);
}
bool is_backfilling() const override {
return peering_state.is_backfilling();
}
PeeringFacade(PeeringState& peering_state)
: peering_state(peering_state) {
}
};
// PGFacade -- a facade (in the GoF-defined meaning) simplifying the huge
// interface of crimson's PG class. The motivation is to have an inventory
// of behaviour that must be provided by a unit test's mock.
struct PGFacade final : BackfillState::PGFacade {
PG& pg;
const eversion_t& get_projected_last_update() const override {
return pg.projected_last_update;
}
PGFacade(PG& pg) : pg(pg) {}
};
} // namespace crimson::osd
| 2,311 | 30.243243 | 79 | h |
null | ceph-main/src/crimson/osd/backfill_state.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <optional>
#include <boost/statechart/custom_reaction.hpp>
#include <boost/statechart/event.hpp>
#include <boost/statechart/event_base.hpp>
#include <boost/statechart/simple_state.hpp>
#include <boost/statechart/state.hpp>
#include <boost/statechart/state_machine.hpp>
#include <boost/statechart/transition.hpp>
#include "osd/recovery_types.h"
namespace crimson::osd {
namespace sc = boost::statechart;
struct BackfillState {
struct BackfillListener;
struct PeeringFacade;
struct PGFacade;
// events comes first
struct PrimaryScanned : sc::event<PrimaryScanned> {
BackfillInterval result;
PrimaryScanned(BackfillInterval&& result)
: result(std::move(result)) {
}
};
struct ReplicaScanned : sc::event<ReplicaScanned> {
pg_shard_t from;
BackfillInterval result;
ReplicaScanned(pg_shard_t from, BackfillInterval&& result)
: from(std::move(from)),
result(std::move(result)) {
}
};
struct ObjectPushed : sc::event<ObjectPushed> {
// TODO: implement replica management; I don't want to follow
// current convention where the backend layer is responsible
// for tracking replicas.
hobject_t object;
pg_stat_t stat;
ObjectPushed(hobject_t object)
: object(std::move(object)) {
}
};
struct Triggered : sc::event<Triggered> {
};
private:
// internal events
struct RequestPrimaryScanning : sc::event<RequestPrimaryScanning> {
};
struct RequestReplicasScanning : sc::event<RequestReplicasScanning> {
};
struct RequestWaiting : sc::event<RequestWaiting> {
};
struct RequestDone : sc::event<RequestDone> {
};
class ProgressTracker;
public:
struct Initial;
struct Enqueuing;
struct PrimaryScanning;
struct ReplicasScanning;
struct Waiting;
struct Done;
struct BackfillMachine : sc::state_machine<BackfillMachine, Initial> {
BackfillMachine(BackfillState& backfill_state,
BackfillListener& backfill_listener,
std::unique_ptr<PeeringFacade> peering_state,
std::unique_ptr<PGFacade> pg);
~BackfillMachine();
BackfillState& backfill_state;
BackfillListener& backfill_listener;
std::unique_ptr<PeeringFacade> peering_state;
std::unique_ptr<PGFacade> pg;
};
private:
template <class S>
struct StateHelper {
StateHelper();
~StateHelper();
BackfillState& backfill_state() {
return static_cast<S*>(this) \
->template context<BackfillMachine>().backfill_state;
}
BackfillListener& backfill_listener() {
return static_cast<S*>(this) \
->template context<BackfillMachine>().backfill_listener;
}
PeeringFacade& peering_state() {
return *static_cast<S*>(this) \
->template context<BackfillMachine>().peering_state;
}
PGFacade& pg() {
return *static_cast<S*>(this)->template context<BackfillMachine>().pg;
}
const PeeringFacade& peering_state() const {
return *static_cast<const S*>(this) \
->template context<BackfillMachine>().peering_state;
}
const BackfillState& backfill_state() const {
return static_cast<const S*>(this) \
->template context<BackfillMachine>().backfill_state;
}
};
public:
// states
struct Crashed : sc::simple_state<Crashed, BackfillMachine>,
StateHelper<Crashed> {
explicit Crashed();
};
struct Initial : sc::state<Initial, BackfillMachine>,
StateHelper<Initial> {
using reactions = boost::mpl::list<
sc::custom_reaction<Triggered>,
sc::transition<sc::event_base, Crashed>>;
explicit Initial(my_context);
// initialize after triggering backfill by on_activate_complete().
// transit to Enqueuing.
sc::result react(const Triggered&);
};
struct Enqueuing : sc::state<Enqueuing, BackfillMachine>,
StateHelper<Enqueuing> {
using reactions = boost::mpl::list<
sc::transition<RequestPrimaryScanning, PrimaryScanning>,
sc::transition<RequestReplicasScanning, ReplicasScanning>,
sc::transition<RequestWaiting, Waiting>,
sc::transition<RequestDone, Done>,
sc::transition<sc::event_base, Crashed>>;
explicit Enqueuing(my_context);
// indicate whether there is any remaining work to do when it comes
// to comparing the hobject_t namespace between primary and replicas.
// true doesn't necessarily mean backfill is done -- there could be
// in-flight pushes or drops which had been enqueued but aren't
// completed yet.
static bool all_enqueued(
const PeeringFacade& peering_state,
const BackfillInterval& backfill_info,
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info);
private:
void maybe_update_range();
void trim_backfill_infos();
// these methods take BackfillIntervals instead of extracting them from
// the state to emphasize the relationships across the main loop.
bool all_emptied(
const BackfillInterval& local_backfill_info,
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info) const;
hobject_t earliest_peer_backfill(
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info) const;
bool should_rescan_replicas(
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info,
const BackfillInterval& backfill_info) const;
// indicate whether a particular acting primary needs to scanned again
// to process next piece of the hobject_t's namespace.
// the logic is per analogy to replica_needs_scan(). See comments there.
bool should_rescan_primary(
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info,
const BackfillInterval& backfill_info) const;
// the result_t is intermediary between {remove,update}_on_peers() and
// updating BackfillIntervals in trim_backfilled_object_from_intervals.
// This step is important because it affects the main loop's condition,
// and thus deserves to be exposed instead of being called deeply from
// {remove,update}_on_peers().
struct [[nodiscard]] result_t {
std::set<pg_shard_t> pbi_targets;
hobject_t new_last_backfill_started;
};
void trim_backfilled_object_from_intervals(
result_t&&,
hobject_t& last_backfill_started,
std::map<pg_shard_t, BackfillInterval>& peer_backfill_info);
result_t remove_on_peers(const hobject_t& check);
result_t update_on_peers(const hobject_t& check);
};
struct PrimaryScanning : sc::state<PrimaryScanning, BackfillMachine>,
StateHelper<PrimaryScanning> {
using reactions = boost::mpl::list<
sc::custom_reaction<ObjectPushed>,
sc::custom_reaction<PrimaryScanned>,
sc::transition<sc::event_base, Crashed>>;
explicit PrimaryScanning(my_context);
sc::result react(ObjectPushed);
// collect scanning result and transit to Enqueuing.
sc::result react(PrimaryScanned);
};
struct ReplicasScanning : sc::state<ReplicasScanning, BackfillMachine>,
StateHelper<ReplicasScanning> {
using reactions = boost::mpl::list<
sc::custom_reaction<ObjectPushed>,
sc::custom_reaction<ReplicaScanned>,
sc::transition<sc::event_base, Crashed>>;
explicit ReplicasScanning(my_context);
// collect scanning result; if all results are collected, transition
// to Enqueuing will happen.
sc::result react(ObjectPushed);
sc::result react(ReplicaScanned);
// indicate whether a particular peer should be scanned to retrieve
// BackfillInterval for new range of hobject_t namespace.
// true when bi.objects is exhausted, replica bi's end is not MAX,
// and primary bi'begin is further than the replica's one.
static bool replica_needs_scan(
const BackfillInterval& replica_backfill_info,
const BackfillInterval& local_backfill_info);
private:
std::set<pg_shard_t> waiting_on_backfill;
};
struct Waiting : sc::state<Waiting, BackfillMachine>,
StateHelper<Waiting> {
using reactions = boost::mpl::list<
sc::custom_reaction<ObjectPushed>,
sc::transition<sc::event_base, Crashed>>;
explicit Waiting(my_context);
sc::result react(ObjectPushed);
};
struct Done : sc::state<Done, BackfillMachine>,
StateHelper<Done> {
using reactions = boost::mpl::list<
sc::transition<sc::event_base, Crashed>>;
explicit Done(my_context);
};
BackfillState(BackfillListener& backfill_listener,
std::unique_ptr<PeeringFacade> peering_state,
std::unique_ptr<PGFacade> pg);
~BackfillState();
void process_event(
boost::intrusive_ptr<const sc::event_base> evt) {
backfill_machine.process_event(*std::move(evt));
}
hobject_t get_last_backfill_started() const {
return last_backfill_started;
}
private:
hobject_t last_backfill_started;
BackfillInterval backfill_info;
std::map<pg_shard_t, BackfillInterval> peer_backfill_info;
BackfillMachine backfill_machine;
std::unique_ptr<ProgressTracker> progress_tracker;
};
// BackfillListener -- an interface used by the backfill FSM to request
// low-level services like issueing `MOSDPGPush` or `MOSDPGBackfillRemove`.
// The goals behind the interface are: 1) unittestability; 2) possibility
// to retrofit classical OSD with BackfillState. For the second reason we
// never use `seastar::future` -- instead responses to the requests are
// conveyed as events; see ObjectPushed as an example.
struct BackfillState::BackfillListener {
virtual void request_replica_scan(
const pg_shard_t& target,
const hobject_t& begin,
const hobject_t& end) = 0;
virtual void request_primary_scan(
const hobject_t& begin) = 0;
virtual void enqueue_push(
const hobject_t& obj,
const eversion_t& v) = 0;
virtual void enqueue_drop(
const pg_shard_t& target,
const hobject_t& obj,
const eversion_t& v) = 0;
virtual void maybe_flush() = 0;
virtual void update_peers_last_backfill(
const hobject_t& new_last_backfill) = 0;
virtual bool budget_available() const = 0;
virtual void backfilled() = 0;
virtual ~BackfillListener() = default;
};
// PeeringFacade -- a facade (in the GoF-defined meaning) simplifying
// the interface of PeeringState. The motivation is to have an inventory
// of behaviour that must be provided by a unit test's mock.
struct BackfillState::PeeringFacade {
virtual hobject_t earliest_backfill() const = 0;
virtual const std::set<pg_shard_t>& get_backfill_targets() const = 0;
virtual const hobject_t& get_peer_last_backfill(pg_shard_t peer) const = 0;
virtual const eversion_t& get_last_update() const = 0;
virtual const eversion_t& get_log_tail() const = 0;
// the performance impact of `std::function` has not been considered yet.
// If there is any proof (from e.g. profiling) about its significance, we
// can switch back to the template variant.
using scan_log_func_t = std::function<void(const pg_log_entry_t&)>;
virtual void scan_log_after(eversion_t, scan_log_func_t) const = 0;
virtual bool is_backfill_target(pg_shard_t peer) const = 0;
virtual void update_complete_backfill_object_stats(const hobject_t &hoid,
const pg_stat_t &stats) = 0;
virtual bool is_backfilling() const = 0;
virtual ~PeeringFacade() {}
};
// PGFacade -- a facade (in the GoF-defined meaning) simplifying the huge
// interface of crimson's PG class. The motivation is to have an inventory
// of behaviour that must be provided by a unit test's mock.
struct BackfillState::PGFacade {
virtual const eversion_t& get_projected_last_update() const = 0;
virtual ~PGFacade() {}
};
class BackfillState::ProgressTracker {
// TODO: apply_stat,
enum class op_stage_t {
enqueued_push,
enqueued_drop,
completed_push,
};
struct registry_item_t {
op_stage_t stage;
std::optional<pg_stat_t> stats;
};
BackfillMachine& backfill_machine;
std::map<hobject_t, registry_item_t> registry;
BackfillState& backfill_state() {
return backfill_machine.backfill_state;
}
PeeringFacade& peering_state() {
return *backfill_machine.peering_state;
}
BackfillListener& backfill_listener() {
return backfill_machine.backfill_listener;
}
public:
ProgressTracker(BackfillMachine& backfill_machine)
: backfill_machine(backfill_machine) {
}
bool tracked_objects_completed() const;
bool enqueue_push(const hobject_t&);
void enqueue_drop(const hobject_t&);
void complete_to(const hobject_t&, const pg_stat_t&);
};
} // namespace crimson::osd
| 12,813 | 32.456919 | 78 | h |
null | ceph-main/src/crimson/osd/ec_backend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <seastar/core/future.hh>
#include "include/buffer_fwd.h"
#include "osd/osd_types.h"
#include "pg_backend.h"
class ECBackend : public PGBackend
{
public:
ECBackend(shard_id_t shard,
CollectionRef coll,
crimson::osd::ShardServices& shard_services,
const ec_profile_t& ec_profile,
uint64_t stripe_width,
DoutPrefixProvider &dpp);
seastar::future<> stop() final {
return seastar::now();
}
void on_actingset_changed(bool same_primary) final {}
private:
ll_read_ierrorator::future<ceph::bufferlist>
_read(const hobject_t& hoid, uint64_t off, uint64_t len, uint32_t flags) override;
rep_op_fut_t
_submit_transaction(std::set<pg_shard_t>&& pg_shards,
const hobject_t& hoid,
ceph::os::Transaction&& txn,
osd_op_params_t&& req,
epoch_t min_epoch, epoch_t max_epoch,
std::vector<pg_log_entry_t>&& log_entries) final;
CollectionRef coll;
crimson::os::FuturizedStore::Shard* store;
seastar::future<> request_committed(const osd_reqid_t& reqid,
const eversion_t& version) final {
return seastar::now();
}
};
| 1,260 | 29.02381 | 84 | h |
null | ceph-main/src/crimson/osd/exceptions.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <exception>
#include <system_error>
#include "crimson/common/errorator.h"
namespace crimson::osd {
class error : private std::system_error {
public:
error(const std::errc ec)
: system_error(std::make_error_code(ec)) {
}
using system_error::code;
using system_error::what;
friend error make_error(int ret);
private:
error(const int ret) noexcept
: system_error(ret, std::system_category()) {
}
};
inline error make_error(const int ret) {
return error{ret};
}
struct object_not_found : public error {
object_not_found() : error(std::errc::no_such_file_or_directory) {}
};
struct invalid_argument : public error {
invalid_argument() : error(std::errc::invalid_argument) {}
};
// FIXME: error handling
struct permission_denied : public error {
permission_denied() : error(std::errc::operation_not_permitted) {}
};
} // namespace crimson::osd
| 1,000 | 20.297872 | 70 | h |
null | ceph-main/src/crimson/osd/heartbeat.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstdint>
#include <seastar/core/future.hh>
#include "common/ceph_time.h"
#include "crimson/common/gated.h"
#include "crimson/net/Dispatcher.h"
#include "crimson/net/Fwd.h"
class MOSDPing;
namespace crimson::osd {
class ShardServices;
}
namespace crimson::mon {
class Client;
}
template<typename Message> using Ref = boost::intrusive_ptr<Message>;
class Heartbeat : public crimson::net::Dispatcher {
public:
using osd_id_t = int;
Heartbeat(osd_id_t whoami,
crimson::osd::ShardServices& service,
crimson::mon::Client& monc,
crimson::net::Messenger &front_msgr,
crimson::net::Messenger &back_msgr);
seastar::future<> start(entity_addrvec_t front,
entity_addrvec_t back);
seastar::future<> stop();
using osds_t = std::vector<osd_id_t>;
void add_peer(osd_id_t peer, epoch_t epoch);
void update_peers(int whoami);
void remove_peer(osd_id_t peer);
osds_t get_peers() const;
const entity_addrvec_t& get_front_addrs() const;
const entity_addrvec_t& get_back_addrs() const;
crimson::net::Messenger &get_front_msgr() const;
crimson::net::Messenger &get_back_msgr() const;
// Dispatcher methods
std::optional<seastar::future<>> ms_dispatch(
crimson::net::ConnectionRef conn, MessageRef m) override;
void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) override;
void ms_handle_connect(crimson::net::ConnectionRef conn, seastar::shard_id) override;
void ms_handle_accept(crimson::net::ConnectionRef conn, seastar::shard_id, bool is_replace) override;
void print(std::ostream&) const;
private:
seastar::future<> handle_osd_ping(crimson::net::ConnectionRef conn,
Ref<MOSDPing> m);
seastar::future<> handle_ping(crimson::net::ConnectionRef conn,
Ref<MOSDPing> m);
seastar::future<> handle_reply(crimson::net::ConnectionRef conn,
Ref<MOSDPing> m);
seastar::future<> handle_you_died();
/// remove down OSDs
/// @return peers not added in this epoch
osds_t remove_down_peers();
/// add enough reporters for fast failure detection
void add_reporter_peers(int whoami);
seastar::future<> start_messenger(crimson::net::Messenger& msgr,
const entity_addrvec_t& addrs);
seastar::future<> maybe_share_osdmap(crimson::net::ConnectionRef,
Ref<MOSDPing> m);
private:
const osd_id_t whoami;
crimson::osd::ShardServices& service;
crimson::mon::Client& monc;
crimson::net::Messenger &front_msgr;
crimson::net::Messenger &back_msgr;
seastar::timer<seastar::lowres_clock> timer;
// use real_clock so it can be converted to utime_t
using clock = ceph::coarse_real_clock;
class ConnectionListener;
class Connection;
class Session;
class Peer;
using peers_map_t = std::map<osd_id_t, Peer>;
peers_map_t peers;
// osds which are considered failed
// osd_id => when was the last time that both front and back pings were acked
// or sent.
// use for calculating how long the OSD has been unresponsive
using failure_queue_t = std::map<osd_id_t, clock::time_point>;
seastar::future<> send_failures(failure_queue_t&& failure_queue);
seastar::future<> send_heartbeats();
void heartbeat_check();
// osds we've reported to monior as failed ones, but they are not marked down
// yet
crimson::common::Gated gate;
class FailingPeers {
public:
FailingPeers(Heartbeat& heartbeat) : heartbeat(heartbeat) {}
bool add_pending(osd_id_t peer,
clock::time_point failed_since,
clock::time_point now,
std::vector<seastar::future<>>& futures);
seastar::future<> cancel_one(osd_id_t peer);
private:
seastar::future<> send_still_alive(osd_id_t, const entity_addrvec_t&);
Heartbeat& heartbeat;
struct failure_info_t {
clock::time_point failed_since;
entity_addrvec_t addrs;
};
std::map<osd_id_t, failure_info_t> failure_pending;
} failing_peers;
};
inline std::ostream& operator<<(std::ostream& out, const Heartbeat& hb) {
hb.print(out);
return out;
}
/*
* Event driven interface for Heartbeat::Peer to be notified when both hb_front
* and hb_back are connected, or connection is lost.
*/
class Heartbeat::ConnectionListener {
public:
ConnectionListener(size_t connections) : connections{connections} {}
void increase_connected() {
assert(connected < connections);
++connected;
if (connected == connections) {
on_connected();
}
}
void decrease_connected() {
assert(connected > 0);
if (connected == connections) {
on_disconnected();
}
--connected;
}
enum class type_t { front, back };
virtual entity_addr_t get_peer_addr(type_t) = 0;
protected:
virtual void on_connected() = 0;
virtual void on_disconnected() = 0;
private:
const size_t connections;
size_t connected = 0;
};
class Heartbeat::Connection {
public:
using type_t = ConnectionListener::type_t;
Connection(osd_id_t peer, bool is_winner_side, type_t type,
crimson::net::Messenger& msgr,
ConnectionListener& listener)
: peer{peer}, type{type},
msgr{msgr}, listener{listener},
is_winner_side{is_winner_side} {
connect();
}
Connection(const Connection&) = delete;
Connection(Connection&&) = delete;
Connection& operator=(const Connection&) = delete;
Connection& operator=(Connection&&) = delete;
~Connection();
bool matches(crimson::net::ConnectionRef _conn) const;
void connected() {
set_connected();
}
bool accepted(crimson::net::ConnectionRef, bool is_replace);
void reset(bool is_replace=false);
seastar::future<> send(MessageURef msg);
void validate();
// retry connection if still pending
void retry();
private:
void set_connected();
void set_unconnected();
void connect();
const osd_id_t peer;
const type_t type;
crimson::net::Messenger& msgr;
ConnectionListener& listener;
/*
* Resolve the following racing when both me and peer are trying to connect
* each other symmetrically, under SocketPolicy::lossy_client:
*
* OSD.A OSD.B
* - -
* |-[1]----> <----[2]-|
* \ /
* \ /
* delay.. X delay..
* / \
* |-[1]x> / \ <x[2]-|
* |<-[2]--- ---[1]->|
* |(reset#1) (reset#2)|
* |(reconnectB) (reconnectA)|
* |-[2]---> <---[1]-|
* delay.. delay..
* (remote close populated)
* |-[2]x> <x[1]-|
* |(reset#2) (reset#1)|
* | ... ... |
* (dead loop!)
*
* Our solution is to remember if such racing was happened recently, and
* establish connection asymmetrically only from the winner side whose osd-id
* is larger.
*/
const bool is_winner_side;
bool racing_detected = false;
crimson::net::ConnectionRef conn;
bool is_connected = false;
friend std::ostream& operator<<(std::ostream& os, const Connection& c) {
if (c.type == type_t::front) {
return os << "con_front(osd." << c.peer << ")";
} else {
return os << "con_back(osd." << c.peer << ")";
}
}
};
/*
* Track the ping history and ping reply (the pong) from the same session, clean up
* history once hb_front or hb_back loses connection and restart the session once
* both connections are connected again.
*
* We cannot simply remove the entire Heartbeat::Peer once hb_front or hb_back
* loses connection, because we would end up with the following deadloop:
*
* OSD.A OSD.B
* - -
* hb_front reset <--(network)--- hb_front close
* | ^
* | |
* remove Peer B (dead loop!) remove Peer A
* | |
* V |
* hb_back close ----(network)---> hb_back reset
*/
class Heartbeat::Session {
public:
Session(osd_id_t peer) : peer{peer} {}
void set_epoch_added(epoch_t epoch_) { epoch = epoch_; }
epoch_t get_epoch_added() const { return epoch; }
void set_projected_epoch(epoch_t epoch_) { projected_epoch = epoch_; }
epoch_t get_projected_epoch() const { return projected_epoch; }
bool is_started() const { return connected; }
bool pinged() const {
if (clock::is_zero(first_tx)) {
// i can never receive a pong without sending any ping message first.
assert(clock::is_zero(last_rx_front) &&
clock::is_zero(last_rx_back));
return false;
} else {
return true;
}
}
enum class health_state {
UNKNOWN,
UNHEALTHY,
HEALTHY,
};
health_state do_health_screen(clock::time_point now) const {
if (!pinged()) {
// we are not healty nor unhealty because we haven't sent anything yet
return health_state::UNKNOWN;
} else if (!ping_history.empty() && ping_history.begin()->second.deadline < now) {
return health_state::UNHEALTHY;
} else if (!clock::is_zero(last_rx_front) &&
!clock::is_zero(last_rx_back)) {
// only declare to be healthy until we have received the first
// replies from both front/back connections
return health_state::HEALTHY;
} else {
return health_state::UNKNOWN;
}
}
clock::time_point failed_since(clock::time_point now) const;
void set_tx(clock::time_point now) {
if (!pinged()) {
first_tx = now;
}
last_tx = now;
}
void on_connected() {
assert(!connected);
connected = true;
ping_history.clear();
}
void on_ping(const utime_t& sent_stamp,
const clock::time_point& deadline) {
assert(connected);
[[maybe_unused]] auto [reply, added] =
ping_history.emplace(sent_stamp, reply_t{deadline, 2});
}
bool on_pong(const utime_t& ping_stamp,
Connection::type_t type,
clock::time_point now) {
assert(connected);
auto ping = ping_history.find(ping_stamp);
if (ping == ping_history.end()) {
// old replies, deprecated by newly sent pings.
return false;
}
auto& unacked = ping->second.unacknowledged;
assert(unacked);
if (type == Connection::type_t::front) {
last_rx_front = now;
unacked--;
} else {
last_rx_back = now;
unacked--;
}
if (unacked == 0) {
ping_history.erase(ping_history.begin(), ++ping);
}
return true;
}
void on_disconnected() {
assert(connected);
connected = false;
if (!ping_history.empty()) {
// we lost our ping_history of the last session, but still need to keep
// the oldest deadline for unhealthy check.
auto oldest = ping_history.begin();
auto sent_stamp = oldest->first;
auto deadline = oldest->second.deadline;
ping_history.clear();
ping_history.emplace(sent_stamp, reply_t{deadline, 0});
}
}
// maintain an entry in ping_history for unhealthy check
void set_inactive_history(clock::time_point);
private:
const osd_id_t peer;
bool connected = false;
// time we sent our first ping request
clock::time_point first_tx;
// last time we sent a ping request
clock::time_point last_tx;
// last time we got a ping reply on the front side
clock::time_point last_rx_front;
// last time we got a ping reply on the back side
clock::time_point last_rx_back;
// most recent epoch we wanted this peer
epoch_t epoch; // rename me to epoch_added
// epoch we expect peer to be at once our sent incrementals are processed
epoch_t projected_epoch = 0;
struct reply_t {
clock::time_point deadline;
// one sent over front conn, another sent over back conn
uint8_t unacknowledged = 0;
};
// history of inflight pings, arranging by timestamp we sent
std::map<utime_t, reply_t> ping_history;
};
class Heartbeat::Peer final : private Heartbeat::ConnectionListener {
public:
Peer(Heartbeat&, osd_id_t);
~Peer();
Peer(Peer&&) = delete;
Peer(const Peer&) = delete;
Peer& operator=(Peer&&) = delete;
Peer& operator=(const Peer&) = delete;
// set/get the epoch at which the peer was added
void set_epoch_added(epoch_t epoch) { session.set_epoch_added(epoch); }
epoch_t get_epoch_added() const { return session.get_epoch_added(); }
void set_projected_epoch(epoch_t epoch) { session.set_projected_epoch(epoch); }
epoch_t get_projected_epoch() const { return session.get_projected_epoch(); }
// if failure, return time_point since last active
// else, return clock::zero()
clock::time_point failed_since(clock::time_point now) const {
return session.failed_since(now);
}
void send_heartbeat(
clock::time_point, ceph::signedspan, std::vector<seastar::future<>>&);
seastar::future<> handle_reply(crimson::net::ConnectionRef, Ref<MOSDPing>);
void handle_reset(crimson::net::ConnectionRef conn, bool is_replace);
void handle_connect(crimson::net::ConnectionRef conn);
void handle_accept(crimson::net::ConnectionRef conn, bool is_replace);
private:
entity_addr_t get_peer_addr(type_t type) override;
void on_connected() override;
void on_disconnected() override;
void do_send_heartbeat(
clock::time_point, ceph::signedspan, std::vector<seastar::future<>>*);
template <typename Func>
void for_each_conn(Func&& f) {
f(con_front);
f(con_back);
}
Heartbeat& heartbeat;
const osd_id_t peer;
Session session;
// if need to send heartbeat when session connected
bool pending_send = false;
Connection con_front;
Connection con_back;
friend std::ostream& operator<<(std::ostream& os, const Peer& p) {
return os << "peer(osd." << p.peer << ")";
}
};
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<Heartbeat> : fmt::ostream_formatter {};
template <> struct fmt::formatter<Heartbeat::Connection> : fmt::ostream_formatter {};
template <> struct fmt::formatter<Heartbeat::Peer> : fmt::ostream_formatter {};
#endif
| 14,078 | 29.474026 | 103 | h |
null | ceph-main/src/crimson/osd/main_config_bootstrap_helpers.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <sys/types.h>
#include <unistd.h>
#include <iostream>
#include <fstream>
#include <random>
#include <seastar/core/future.hh>
#include "common/ceph_argparse.h"
#include "include/expected.hpp"
#include "include/random.h"
namespace crimson::osd {
void usage(const char* prog);
inline uint64_t get_nonce()
{
return ceph::util::generate_random_number<uint64_t>();
}
seastar::future<> populate_config_from_mon();
struct early_config_t {
std::vector<std::string> early_args;
std::vector<std::string> ceph_args;
std::string cluster_name{"ceph"};
std::string conf_file_list;
CephInitParameters init_params{CEPH_ENTITY_TYPE_OSD};
/// Returned vector must not outlive in
auto to_ptr_vector(const std::vector<std::string> &in) {
std::vector<const char *> ret;
ret.reserve(in.size());
std::transform(
std::begin(in), std::end(in),
std::back_inserter(ret),
[](const auto &str) { return str.c_str(); });
return ret;
}
std::vector<const char *> get_early_args() {
return to_ptr_vector(early_args);
}
std::vector<const char *> get_ceph_args() {
return to_ptr_vector(ceph_args);
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(early_args, bl);
encode(ceph_args, bl);
encode(cluster_name, bl);
encode(conf_file_list, bl);
encode(init_params, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(early_args, bl);
decode(ceph_args, bl);
decode(cluster_name, bl);
decode(conf_file_list, bl);
decode(init_params, bl);
DECODE_FINISH(bl);
}
};
/**
* get_early_config
*
* Compile initial configuration information from command line arguments,
* config files, and monitors.
*
* This implementation forks off a worker process to do this work and must
* therefore be called very early in main(). (See implementation for an
* explanation).
*/
tl::expected<early_config_t, int>
get_early_config(int argc, const char *argv[]);
}
WRITE_CLASS_ENCODER(crimson::osd::early_config_t)
| 2,214 | 22.56383 | 74 | h |
null | ceph-main/src/crimson/osd/object_context.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <map>
#include <optional>
#include <utility>
#include <seastar/core/shared_future.hh>
#include <seastar/core/shared_ptr.hh>
#include "common/intrusive_lru.h"
#include "osd/object_state.h"
#include "crimson/common/exception.h"
#include "crimson/common/tri_mutex.h"
#include "crimson/osd/osd_operation.h"
namespace ceph {
class Formatter;
}
namespace crimson::common {
class ConfigProxy;
}
namespace crimson::osd {
class Watch;
struct SnapSetContext;
using SnapSetContextRef = boost::intrusive_ptr<SnapSetContext>;
template <typename OBC>
struct obc_to_hoid {
using type = hobject_t;
const type &operator()(const OBC &obc) {
return obc.obs.oi.soid;
}
};
struct SnapSetContext :
public boost::intrusive_ref_counter<SnapSetContext,
boost::thread_unsafe_counter>
{
hobject_t oid;
SnapSet snapset;
bool exists = false;
/**
* exists
*
* Because ObjectContext's are cached, we need to be able to express the case
* where the object to which a cached ObjectContext refers does not exist.
* ObjectContext's for yet-to-be-created objects are initialized with exists=false.
* The ObjectContext for a deleted object will have exists set to false until it falls
* out of cache (or another write recreates the object).
*/
explicit SnapSetContext(const hobject_t& o) :
oid(o), exists(false) {}
};
class ObjectContext : public ceph::common::intrusive_lru_base<
ceph::common::intrusive_lru_config<
hobject_t, ObjectContext, obc_to_hoid<ObjectContext>>>
{
public:
ObjectState obs;
SnapSetContextRef ssc;
// the watch / notify machinery rather stays away from the hot and
// frequented paths. std::map is used mostly because of developer's
// convenience.
using watch_key_t = std::pair<uint64_t, entity_name_t>;
std::map<watch_key_t, seastar::shared_ptr<crimson::osd::Watch>> watchers;
ObjectContext(hobject_t hoid) : obs(std::move(hoid)) {}
const hobject_t &get_oid() const {
return obs.oi.soid;
}
bool is_head() const {
return get_oid().is_head();
}
hobject_t get_head_oid() const {
return get_oid().get_head();
}
const SnapSet &get_head_ss() const {
ceph_assert(is_head());
ceph_assert(ssc);
return ssc->snapset;
}
void set_head_state(ObjectState &&_obs, SnapSetContextRef &&_ssc) {
ceph_assert(is_head());
obs = std::move(_obs);
ssc = std::move(_ssc);
}
void set_clone_state(ObjectState &&_obs) {
ceph_assert(!is_head());
obs = std::move(_obs);
}
/// pass the provided exception to any waiting consumers of this ObjectContext
template<typename Exception>
void interrupt(Exception ex) {
lock.abort(std::move(ex));
if (recovery_read_marker) {
drop_recovery_read();
}
}
private:
tri_mutex lock;
bool recovery_read_marker = false;
template <typename Lock, typename Func>
auto _with_lock(Lock&& lock, Func&& func) {
Ref obc = this;
return lock.lock().then([&lock, func = std::forward<Func>(func), obc]() mutable {
return seastar::futurize_invoke(func).finally([&lock, obc] {
lock.unlock();
});
});
}
boost::intrusive::list_member_hook<> list_hook;
uint64_t list_link_cnt = 0;
public:
template <typename ListType>
void append_to(ListType& list) {
if (list_link_cnt++ == 0) {
list.push_back(*this);
}
}
template <typename ListType>
void remove_from(ListType&& list) {
assert(list_link_cnt > 0);
if (--list_link_cnt == 0) {
list.erase(std::decay_t<ListType>::s_iterator_to(*this));
}
}
using obc_accessing_option_t = boost::intrusive::member_hook<
ObjectContext,
boost::intrusive::list_member_hook<>,
&ObjectContext::list_hook>;
template<RWState::State Type, typename InterruptCond = void, typename Func>
auto with_lock(Func&& func) {
if constexpr (!std::is_void_v<InterruptCond>) {
auto wrapper = ::crimson::interruptible::interruptor<InterruptCond>::wrap_function(std::forward<Func>(func));
switch (Type) {
case RWState::RWWRITE:
return _with_lock(lock.for_write(), std::move(wrapper));
case RWState::RWREAD:
return _with_lock(lock.for_read(), std::move(wrapper));
case RWState::RWEXCL:
return _with_lock(lock.for_excl(), std::move(wrapper));
case RWState::RWNONE:
return seastar::futurize_invoke(std::move(wrapper));
default:
assert(0 == "noop");
}
} else {
switch (Type) {
case RWState::RWWRITE:
return _with_lock(lock.for_write(), std::forward<Func>(func));
case RWState::RWREAD:
return _with_lock(lock.for_read(), std::forward<Func>(func));
case RWState::RWEXCL:
return _with_lock(lock.for_excl(), std::forward<Func>(func));
case RWState::RWNONE:
return seastar::futurize_invoke(std::forward<Func>(func));
default:
assert(0 == "noop");
}
}
}
template<RWState::State Type, typename InterruptCond = void, typename Func>
auto with_promoted_lock(Func&& func) {
if constexpr (!std::is_void_v<InterruptCond>) {
auto wrapper = ::crimson::interruptible::interruptor<InterruptCond>::wrap_function(std::forward<Func>(func));
switch (Type) {
case RWState::RWWRITE:
return _with_lock(lock.excl_from_write(), std::move(wrapper));
case RWState::RWREAD:
return _with_lock(lock.excl_from_read(), std::move(wrapper));
case RWState::RWEXCL:
return _with_lock(lock.excl_from_excl(), std::move(wrapper));
case RWState::RWNONE:
return _with_lock(lock.for_excl(), std::move(wrapper));
default:
assert(0 == "noop");
}
} else {
switch (Type) {
case RWState::RWWRITE:
return _with_lock(lock.excl_from_write(), std::forward<Func>(func));
case RWState::RWREAD:
return _with_lock(lock.excl_from_read(), std::forward<Func>(func));
case RWState::RWEXCL:
return _with_lock(lock.excl_from_excl(), std::forward<Func>(func));
case RWState::RWNONE:
return _with_lock(lock.for_excl(), std::forward<Func>(func));
default:
assert(0 == "noop");
}
}
}
bool empty() const {
return !lock.is_acquired();
}
bool is_request_pending() const {
return lock.is_acquired();
}
bool get_recovery_read() {
if (lock.try_lock_for_read()) {
recovery_read_marker = true;
return true;
} else {
return false;
}
}
void wait_recovery_read() {
assert(lock.get_readers() > 0);
recovery_read_marker = true;
}
void drop_recovery_read() {
assert(recovery_read_marker);
recovery_read_marker = false;
}
bool maybe_get_excl() {
return lock.try_lock_for_excl();
}
};
using ObjectContextRef = ObjectContext::Ref;
class ObjectContextRegistry : public md_config_obs_t {
ObjectContext::lru_t obc_lru;
public:
ObjectContextRegistry(crimson::common::ConfigProxy &conf);
~ObjectContextRegistry();
std::pair<ObjectContextRef, bool> get_cached_obc(const hobject_t &hoid) {
return obc_lru.get_or_create(hoid);
}
ObjectContextRef maybe_get_cached_obc(const hobject_t &hoid) {
return obc_lru.get(hoid);
}
void clear_range(const hobject_t &from,
const hobject_t &to) {
obc_lru.clear_range(from, to);
}
template <class F>
void for_each(F&& f) {
obc_lru.for_each(std::forward<F>(f));
}
const char** get_tracked_conf_keys() const final;
void handle_conf_change(const crimson::common::ConfigProxy& conf,
const std::set <std::string> &changed) final;
};
std::optional<hobject_t> resolve_oid(const SnapSet &ss,
const hobject_t &oid);
} // namespace crimson::osd
| 7,762 | 27.025271 | 115 | h |
null | ceph-main/src/crimson/osd/object_context_loader.h | #pragma once
#include <seastar/core/future.hh>
#include "crimson/common/errorator.h"
#include "crimson/osd/object_context.h"
#include "crimson/osd/pg_backend.h"
namespace crimson::osd {
class ObjectContextLoader {
public:
using obc_accessing_list_t = boost::intrusive::list<
ObjectContext,
ObjectContext::obc_accessing_option_t>;
ObjectContextLoader(
ObjectContextRegistry& _obc_services,
PGBackend& _backend,
DoutPrefixProvider& dpp)
: obc_registry{_obc_services},
backend{_backend},
dpp{dpp}
{}
using load_obc_ertr = crimson::errorator<
crimson::ct_error::enoent,
crimson::ct_error::object_corrupted>;
using load_obc_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
load_obc_ertr>;
using with_obc_func_t =
std::function<load_obc_iertr::future<> (ObjectContextRef)>;
using with_both_obc_func_t =
std::function<load_obc_iertr::future<> (ObjectContextRef, ObjectContextRef)>;
// Use this variant by default
template<RWState::State State>
load_obc_iertr::future<> with_obc(hobject_t oid,
with_obc_func_t&& func);
// Use this variant in the case where the head object
// obc is already locked and only the clone obc is needed.
// Avoid nesting with_head_obc() calls by using with_clone_obc()
// with an already locked head.
template<RWState::State State>
load_obc_iertr::future<> with_clone_obc_only(ObjectContextRef head,
hobject_t oid,
with_obc_func_t&& func);
// Use this variant in the case where both the head
// object *and* the matching clone object are being used
// in func.
template<RWState::State State>
load_obc_iertr::future<> with_head_and_clone_obc(
hobject_t oid,
with_both_obc_func_t&& func);
load_obc_iertr::future<> reload_obc(ObjectContext& obc) const;
void notify_on_change(bool is_primary);
private:
ObjectContextRegistry& obc_registry;
PGBackend& backend;
DoutPrefixProvider& dpp;
obc_accessing_list_t obc_set_accessing;
template<RWState::State State>
load_obc_iertr::future<> with_clone_obc(hobject_t oid,
with_obc_func_t&& func);
template<RWState::State State>
load_obc_iertr::future<> with_head_obc(ObjectContextRef obc,
bool existed,
with_obc_func_t&& func);
template<RWState::State State>
load_obc_iertr::future<ObjectContextRef>
get_or_load_obc(ObjectContextRef obc,
bool existed);
load_obc_iertr::future<ObjectContextRef>
load_obc(ObjectContextRef obc);
};
}
| 2,772 | 30.511364 | 81 | h |