diff --git a/applications/direct_hub/Makefile b/applications/direct_hub/Makefile new file mode 100644 index 000000000..f5cedde67 --- /dev/null +++ b/applications/direct_hub/Makefile @@ -0,0 +1,3 @@ +SUBDIRS = targets +-include config.mk +include $(OPENMRNPATH)/etc/recurse.mk diff --git a/applications/direct_hub/config.mk b/applications/direct_hub/config.mk new file mode 120000 index 000000000..e270c0389 --- /dev/null +++ b/applications/direct_hub/config.mk @@ -0,0 +1 @@ +../default_config.mk \ No newline at end of file diff --git a/applications/direct_hub/main.cxx b/applications/direct_hub/main.cxx new file mode 100644 index 000000000..35cee9f8f --- /dev/null +++ b/applications/direct_hub/main.cxx @@ -0,0 +1,240 @@ +/** \copyright + * Copyright (c) 2013, Balazs Racz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * \file main.cxx + * + * An application which acts as an openlcb hub with the GC protocol, using the + * DirectHub infrastructure. + * + * @author Balazs Racz + * @date 31 Dec 2023 + */ + +#include +#include +#include +#include + +#include + +#include "executor/Executor.hxx" +#include "executor/Service.hxx" +#include "os/os.h" +#include "utils/ClientConnection.hxx" +#include "utils/DirectHub.hxx" +#include "utils/HubDeviceSelect.hxx" +#include "utils/SocketCan.hxx" +#include "utils/constants.hxx" + +Executor<1> g_executor("g_executor", 0, 1024); +Service g_service(&g_executor); + +std::unique_ptr g_direct_hub {create_hub(&g_executor)}; + +CanHubFlow can_hub0(&g_service); + +OVERRIDE_CONST(gc_generate_newlines, 1); +OVERRIDE_CONST(gridconnect_tcp_snd_buffer_size, 8192); +OVERRIDE_CONST(gridconnect_tcp_rcv_buffer_size, 8192); +OVERRIDE_CONST(gridconnect_tcp_notsent_lowat_buffer_size, 1024); + +int port = 12021; +const char *device_path = nullptr; +const char *socket_can_path = nullptr; +int upstream_port = 12021; +const char *upstream_host = nullptr; +bool timestamped = false; +bool export_mdns = false; +const char *mdns_name = "openmrn_hub"; +bool printpackets = false; + +void usage(const char *e) +{ + fprintf(stderr, + "Usage: %s [-p port] [-d device_path] [-u upstream_host] " + "[-q upstream_port] [-m] [-n mdns_name] " +#if defined(__linux__) + "[-s socketcan_interface] " +#endif + "[-t] [-l]\n\n", + e); + fprintf(stderr, + "GridConnect CAN HUB.\nListens to a specific TCP port, " + "reads CAN packets from the incoming connections using " + "the GridConnect protocol, and forwards all incoming " + "packets to all other participants.\n\nArguments:\n"); + fprintf(stderr, + "\t-p port specifies the port number to listen on, " + "default is 12021.\n"); + fprintf(stderr, + "\t-d device is a path to a physical device doing " + "serial-CAN or USB-CAN. If specified, opens device and " + "adds it to the hub.\n"); +#if defined(__linux__) + fprintf(stderr, + "\t-s socketcan_interface is a socketcan device (e.g. 'can0'). " + "If specified, opens device and adds it to the hub.\n"); +#endif + fprintf(stderr, + "\t-u upstream_host is the host name for an upstream " + "hub. If specified, this hub will connect to an upstream " + "hub.\n"); + fprintf(stderr, + "\t-q upstream_port is the port number for the upstream hub.\n"); + fprintf(stderr, "\t-t prints timestamps for each packet.\n"); + fprintf(stderr, "\t-l print all packets.\n"); +#ifdef HAVE_AVAHI_CLIENT + fprintf(stderr, "\t-m exports the current service on mDNS.\n"); + fprintf( + stderr, "\t-n mdns_name sets the exported mDNS name. Implies -m.\n"); +#endif + exit(1); +} + +void parse_args(int argc, char *argv[]) +{ + int opt; + while ((opt = getopt(argc, argv, "hp:d:s:u:q:tlmn:")) >= 0) + { + switch (opt) + { + case 'h': + usage(argv[0]); + break; + case 'd': + device_path = optarg; + break; +#if defined(__linux__) + case 's': + socket_can_path = optarg; + break; +#endif + case 'p': + port = atoi(optarg); + break; + case 'u': + upstream_host = optarg; + break; + case 'q': + upstream_port = atoi(optarg); + break; + case 't': + timestamped = true; + break; + case 'm': + export_mdns = true; + break; + case 'n': + mdns_name = optarg; + export_mdns = true; + break; + case 'l': + printpackets = true; + break; + default: + fprintf(stderr, "Unknown option %c\n", opt); + usage(argv[0]); + } + } +} + +void create_legacy_bridge() { + static bool is_created = false; + if (!is_created) { + is_created = true; + create_gc_to_legacy_can_bridge(g_direct_hub.get(), &can_hub0); + } +} + +/** Entry point to application. + * @param argc number of command line arguments + * @param argv array of command line arguments + * @return 0, should never return + */ +int appl_main(int argc, char *argv[]) +{ + parse_args(argc, argv); + // GcPacketPrinter packet_printer(&can_hub0, timestamped); + GcPacketPrinter *packet_printer = NULL; + if (printpackets) + { + create_legacy_bridge(); + packet_printer = new GcPacketPrinter(&can_hub0, timestamped); + } + fprintf(stderr, "packet_printer points to %p\n", packet_printer); + create_direct_gc_tcp_hub(g_direct_hub.get(), port); + vector> connections; + +#ifdef HAVE_AVAHI_CLIENT + void mdns_client_start(); + void mdns_publish(const char *name, uint16_t port); + + if (export_mdns) + { + mdns_client_start(); + mdns_publish(mdns_name, port); + } +#endif + +#if defined(__linux__) + if (socket_can_path) + { + int s = socketcan_open(socket_can_path, 1); + if (s >= 0) + { + create_legacy_bridge(); + new HubDeviceSelect(&can_hub0, s); + fprintf(stderr, "Opened SocketCan %s: fd %d\n", socket_can_path, s); + } + else + { + fprintf(stderr, "Failed to open SocketCan %s.\n", socket_can_path); + } + } +#endif + + if (upstream_host) + { + connections.emplace_back(new UpstreamConnectionClient( + "upstream", g_direct_hub.get(), upstream_host, upstream_port)); + } + + if (device_path) + { + connections.emplace_back(new DeviceConnectionClient( + "device", g_direct_hub.get(), device_path)); + } + + while (1) + { + for (const auto &p : connections) + { + p->ping(); + } + sleep(1); + } + return 0; +} diff --git a/applications/direct_hub/subdirs b/applications/direct_hub/subdirs new file mode 100644 index 000000000..4e0254829 --- /dev/null +++ b/applications/direct_hub/subdirs @@ -0,0 +1,2 @@ +SUBDIRS = \ + diff --git a/applications/direct_hub/targets/Makefile b/applications/direct_hub/targets/Makefile new file mode 100644 index 000000000..9daf9a83a --- /dev/null +++ b/applications/direct_hub/targets/Makefile @@ -0,0 +1,3 @@ +SUBDIRS = linux.x86 \ + +include $(OPENMRNPATH)/etc/recurse.mk diff --git a/applications/direct_hub/targets/linux.x86/.gitignore b/applications/direct_hub/targets/linux.x86/.gitignore new file mode 100644 index 000000000..a343f9175 --- /dev/null +++ b/applications/direct_hub/targets/linux.x86/.gitignore @@ -0,0 +1,2 @@ +direct_hub +*_test diff --git a/applications/direct_hub/targets/linux.x86/AvaHiMDNS.cxx b/applications/direct_hub/targets/linux.x86/AvaHiMDNS.cxx new file mode 120000 index 000000000..8f433bffb --- /dev/null +++ b/applications/direct_hub/targets/linux.x86/AvaHiMDNS.cxx @@ -0,0 +1 @@ +../../../hub/targets/linux.x86/AvaHiMDNS.cxx \ No newline at end of file diff --git a/applications/direct_hub/targets/linux.x86/Makefile b/applications/direct_hub/targets/linux.x86/Makefile new file mode 100644 index 000000000..684aeb0fc --- /dev/null +++ b/applications/direct_hub/targets/linux.x86/Makefile @@ -0,0 +1,5 @@ +-include ../../config.mk +include $(OPENMRNPATH)/etc/prog.mk + +SYSLIBRARIES += -lavahi-client -lavahi-common +CXXFLAGS += -DHAVE_AVAHI_CLIENT diff --git a/applications/direct_hub/targets/linux.x86/lib/Makefile b/applications/direct_hub/targets/linux.x86/lib/Makefile new file mode 100644 index 000000000..a414ed98e --- /dev/null +++ b/applications/direct_hub/targets/linux.x86/lib/Makefile @@ -0,0 +1 @@ +include $(OPENMRNPATH)/etc/app_target_lib.mk diff --git a/applications/train/targets/nonos.xtensa.esp8266.bracz-deadrail-proto/main.cxx b/applications/train/targets/nonos.xtensa.esp8266.bracz-deadrail-proto/main.cxx index 9f499188d..d94411c89 100644 --- a/applications/train/targets/nonos.xtensa.esp8266.bracz-deadrail-proto/main.cxx +++ b/applications/train/targets/nonos.xtensa.esp8266.bracz-deadrail-proto/main.cxx @@ -233,6 +233,7 @@ class ESPHuzzahTrain : public openlcb::TrainImpl void set_speed(openlcb::SpeedType speed) override { lastSpeed_ = speed; + estop = false; g_speed_controller.call_speed(speed); if (f0) { @@ -257,10 +258,16 @@ class ESPHuzzahTrain : public openlcb::TrainImpl /** Sets the train to emergency stop. */ void set_emergencystop() override { - // g_speed_controller.call_estop(); + g_speed_controller.call_estop(); lastSpeed_.set_mph(0); // keeps direction + estop = true; } + bool get_emergencystop() override + { + return estop; + } + /** Sets the value of a function. * @param address is a 24-bit address of the function to set. For legacy DCC * locomotives, see @ref TractionDefs for the address definitions (0=light, @@ -331,6 +338,7 @@ class ESPHuzzahTrain : public openlcb::TrainImpl openlcb::SpeedType lastSpeed_ = 0.0; bool f0 = false; bool f1 = false; + bool estop = false; }; const char kFdiXml[] = diff --git a/include/nmranet_config.h b/include/nmranet_config.h index 58207fa4d..d72c19955 100644 --- a/include/nmranet_config.h +++ b/include/nmranet_config.h @@ -128,6 +128,14 @@ DECLARE_CONST(gridconnect_buffer_delay_usec); * two threads per client (multi-threaded) execution model. */ DECLARE_CONST(gridconnect_tcp_use_select); +/// Maximum number of packets to parse from a single DirectHubPort before we +/// wait for data to drain from the system. +DECLARE_CONST(directhub_port_max_incoming_packets); + +/// Number of bytes that we will be reading in one go from an incoming port. We +/// will allocate at least this many bytes dedicated for each input port. +DECLARE_CONST(directhub_port_incoming_buffer_size); + /** Number of entries in the remote alias cache */ DECLARE_CONST(remote_alias_cache_size); diff --git a/src/executor/AsyncNotifiableBlock.cxx b/src/executor/AsyncNotifiableBlock.cxx new file mode 100644 index 000000000..b8282b123 --- /dev/null +++ b/src/executor/AsyncNotifiableBlock.cxx @@ -0,0 +1,68 @@ +/** \copyright + * Copyright (c) 2020, Balazs Racz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * \file AsyncNotifiableBlock.cxx + * + * An advanced notifiable construct that acts as a fixed pool of + * BarrierNotifiables. A stateflow can pend on acquiring one of them, use that + * barrier, with it automatically returning to the next caller when the Barrier + * goes out of counts. + * + * @author Balazs Racz + * @date 18 Feb 2020 + */ + +#ifndef _DEFAULT_SOURCE +#define _DEFAULT_SOURCE +#endif + +#include "AsyncNotifiableBlock.hxx" + +#include "os/sleep.h" + +AsyncNotifiableBlock::~AsyncNotifiableBlock() +{ + // Recollects all notifiable instances, including waiting as long as needed + // if there are some that have not finished yet. + for (unsigned i = 0; i < count_; ++i) + { + while (true) + { + QMember *m = next().item; + if (!m) + { + LOG(VERBOSE, + "shutdown async notifiable block: waiting for returns"); + microsleep(100); + } + else + { + HASSERT(initialize(m)->abort_if_almost_done()); + break; + } + } + } +} diff --git a/src/executor/AsyncNotifiableBlock.cxxtest b/src/executor/AsyncNotifiableBlock.cxxtest new file mode 100644 index 000000000..7030641ed --- /dev/null +++ b/src/executor/AsyncNotifiableBlock.cxxtest @@ -0,0 +1,110 @@ +#include "executor/AsyncNotifiableBlock.hxx" + +#include "utils/test_main.hxx" + +class AsyncNotifiableBlockTest : public ::testing::Test +{ +protected: + AsyncNotifiableBlock b_ {2}; +}; + +TEST_F(AsyncNotifiableBlockTest, create) +{ +} + +TEST_F(AsyncNotifiableBlockTest, count_request_release) +{ + EXPECT_EQ(2u, b_.pending()); + QMember *e = b_.next(0); + EXPECT_NE(nullptr, e); + EXPECT_EQ(1u, b_.pending()); + + QMember *f = b_.next(0); + EXPECT_NE(nullptr, f); + EXPECT_EQ(0u, b_.pending()); + + QMember *g = b_.next(0); + EXPECT_EQ(nullptr, g); + EXPECT_EQ(0u, b_.pending()); + + b_.initialize(e)->notify(); + EXPECT_EQ(1u, b_.pending()); + + QMember *h = b_.next(0); + EXPECT_EQ(e, h); + + EXPECT_EQ(0u, b_.pending()); + + b_.initialize(f)->notify(); + b_.initialize(h)->notify(); +} + +TEST_F(AsyncNotifiableBlockTest, barrier_semantics) +{ + EXPECT_EQ(2u, b_.pending()); + QMember *e = b_.next(0); + BarrierNotifiable *bn = b_.initialize(e); + EXPECT_EQ(1u, b_.pending()); + + bn->new_child(); + bn->notify(); + EXPECT_EQ(1u, b_.pending()); + bn->notify(); + EXPECT_EQ(2u, b_.pending()); +} + +class FakeExecutable : public Executable +{ +public: + void run() override + { + DIE("unexpected."); + } + + void alloc_result(QMember *m) override + { + ASSERT_TRUE(m); + m_ = m; + } + + QMember *m_ {nullptr}; +}; + +TEST_F(AsyncNotifiableBlockTest, async_allocation) +{ + EXPECT_EQ(2u, b_.pending()); + QMember *e = b_.next(0); + EXPECT_NE(nullptr, e); + EXPECT_EQ(1u, b_.pending()); + + FakeExecutable cli1, cli2, cli3; + EXPECT_EQ(nullptr, cli1.m_); + EXPECT_EQ(nullptr, cli2.m_); + EXPECT_EQ(nullptr, cli3.m_); + + b_.next_async(&cli1); + EXPECT_EQ(0u, b_.pending()); + EXPECT_NE(nullptr, cli1.m_); + EXPECT_NE(e, cli1.m_); + + b_.next_async(&cli2); + b_.next_async(&cli3); + EXPECT_EQ(nullptr, cli2.m_); + EXPECT_EQ(nullptr, cli3.m_); + EXPECT_EQ(0u, b_.pending()); + + b_.initialize(e)->notify(); // will be handed out to cli2 + + EXPECT_EQ(0u, b_.pending()); + EXPECT_EQ(e, cli2.m_); + + b_.initialize(cli1.m_)->notify(); // will be handed out to cli3 + EXPECT_EQ(cli1.m_, cli3.m_); + EXPECT_EQ(0u, b_.pending()); + + b_.initialize(cli3.m_)->notify(); // will be handed back + EXPECT_EQ(1u, b_.pending()); + + b_.initialize(cli2.m_)->notify(); // will be handed back + EXPECT_EQ(2u, b_.pending()); +} diff --git a/src/executor/AsyncNotifiableBlock.hxx b/src/executor/AsyncNotifiableBlock.hxx new file mode 100644 index 000000000..abff06829 --- /dev/null +++ b/src/executor/AsyncNotifiableBlock.hxx @@ -0,0 +1,139 @@ +/** \copyright + * Copyright (c) 2013, Balazs Racz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * \file AsyncNotifiableBlock.hxx + * + * An advanced notifiable construct that acts as a fixed pool of + * BarrierNotifiables. A stateflow can pend on acquiring one of them, use that + * barrier, with it automatically returning to the next caller when the Barrier + * goes out of counts. + * + * @author Balazs Racz + * @date 18 Feb 2020 + */ + +#ifndef _EXECUTOR_ASYNCNOTIFIABLEBLOCK_HXX_ +#define _EXECUTOR_ASYNCNOTIFIABLEBLOCK_HXX_ + +#include + +#include "executor/Notifiable.hxx" +#include "utils/Queue.hxx" +#include "utils/logging.h" + +#include "utils/Buffer.hxx" + +/// A block of BarrierNotifiable objects, with an asynchronous allocation +/// call. Caller StateFlows can block on allocating a new entry, and then get +/// back a fresh BarrierNotifiable, which, upon being released will +/// automatically be reallocated to a waiting flow, if any. +class AsyncNotifiableBlock : private Notifiable, public QAsync +{ +private: + /// Notifiable class that can act as a BarrierNotifiable but also be + /// enlisted in a queue. + class QueuedBarrier : public BarrierNotifiable, public QMember + { + public: + /// Notification implementation. + /// + /// Theory of operation: If this was the last notification (count goes + /// from 1 to 0), we take the done_ pointer, cast it to the owning + /// AsyncNotifiableBlock, and release outselves into the queue + /// there. We keep the count at 1 at all times, which ensures that the + /// done_ pointer remains pointing to the owner AsyncNotifiableBlock. + void notify() override + { + AtomicHolder h(this); + if (count_ == 1) + { + LOG(VERBOSE, "block notifiable %p returned pool size %u", + (BarrierNotifiable *)this, + (unsigned)mainBufferPool->total_size()); + auto *tgt = static_cast(done_); + tgt->insert(this); + } + else + { + --count_; + } + } + + /// Checks that there is exactly one count in here. + void check_one_count() + { + HASSERT(count_ == 1); + } + }; + +public: + /// Constructor. @param num_parallelism tells how many BarrierNotifiables + /// we should have and hand out to callers requesting them. + AsyncNotifiableBlock(unsigned num_parallelism) + : count_(num_parallelism) + , barriers_(new QueuedBarrier[num_parallelism]) + { + for (unsigned i = 0; i < num_parallelism; ++i) + { + barriers_[i].reset(this); + this->insert(&barriers_[i]); + } + } + + /// Destructor. + ~AsyncNotifiableBlock(); + + /// Turns an allocated entry from the QAsync into a usable + /// BarrierNotifiable. + /// @param entry a QMember that was allocated from *this. + /// @return an initialized BarrierNotifiable with exactly one count, and + /// done_ set up to be returned for further use. + BarrierNotifiable *initialize(QMember *entry) + { + QueuedBarrier *b = static_cast(entry); + // We must be owning this entry. + HASSERT(barriers_.get() <= b); + HASSERT(b <= (barriers_.get() + count_)); + b->check_one_count(); + return b; + } + + /// Notification implementation -- should never be called. + void notify() override + { + DIE("Should not receive this notification"); + } + +private: + /// How many barriers do we have. + unsigned count_; + /// The pointer to the block of barriernotifiables. + std::unique_ptr barriers_; + + DISALLOW_COPY_AND_ASSIGN(AsyncNotifiableBlock); +}; + +#endif // _EXECUTOR_ASYNCNOTIFIABLEBLOCK_HXX_ diff --git a/src/executor/Notifiable.hxx b/src/executor/Notifiable.hxx index f783de862..16f6161f8 100644 --- a/src/executor/Notifiable.hxx +++ b/src/executor/Notifiable.hxx @@ -171,7 +171,7 @@ private: /// A BarrierNotifiable allows to create a number of child Notifiable and wait /// for all of them to finish. When the last one is finished, the parent done /// callback is called. -class BarrierNotifiable : public Notifiable, private Atomic +class BarrierNotifiable : public Notifiable, protected Atomic { public: /** Constructs a barrier notifiable that is done. Users should call reset() @@ -240,7 +240,7 @@ public: } } -private: +protected: /// How many outstanding notifications we are still waiting for. When 0, /// the barrier is not live; when reaches zero, done_ will be called. unsigned count_; diff --git a/src/executor/sources b/src/executor/sources index 640c3bcec..b70ace2ea 100644 --- a/src/executor/sources +++ b/src/executor/sources @@ -3,6 +3,7 @@ VPATH := $(SRCDIR) CSRCS += CXXSRCS += \ + AsyncNotifiableBlock.cxx \ Executor.cxx \ Notifiable.cxx \ Service.cxx \ diff --git a/src/freertos_drivers/net_cc32xx/CC32xxWiFi.cxx b/src/freertos_drivers/net_cc32xx/CC32xxWiFi.cxx index 063903e30..03863388c 100644 --- a/src/freertos_drivers/net_cc32xx/CC32xxWiFi.cxx +++ b/src/freertos_drivers/net_cc32xx/CC32xxWiFi.cxx @@ -58,6 +58,9 @@ struct CC32xxWiFi::NetAppEvent : public ::SlNetAppEvent_t {}; /** CC32xx forward declaration Helper */ struct CC32xxWiFi::SockEvent : public ::SlSockEvent_t {}; +/** CC32xx forward declaration Helper */ +struct CC32xxWiFi::SockTriggerEvent : public ::SlSockTriggerEvent_t {}; + /** CC32xx forward declaration Helper */ struct CC32xxWiFi::HttpServerEvent : public ::SlNetAppHttpServerEvent_t {}; @@ -1377,6 +1380,20 @@ void CC32xxWiFi::sock_event_handler(SockEvent *event) } } +/* + * CC32xxWiFi::trigger_event_handler() + */ +void CC32xxWiFi::trigger_event_handler(SockTriggerEvent *event) +{ + if (!event) + { + return; + } + + LOG(INFO, "Socket trigger event %u %d", (unsigned)event->Event, + (unsigned)event->EventData); +} + /* * CC32xxWiFi::http_server_callback() */ @@ -1735,6 +1752,16 @@ void SimpleLinkFatalErrorEventHandler(SlDeviceFatal_t *slFatalErrorEvent) static_cast(slFatalErrorEvent)); } +/** Notifies the service about a wifi asynchronous socket event callback. This + * means that sl_Select needs to be re-run and certain sockets might need + * wakeup. + * @param event parameters from the socket. */ +void SimpleLinkSocketTriggerEventHandler(SlSockTriggerEvent_t *event) +{ + CC32xxWiFi::instance()->trigger_event_handler( + static_cast(event)); +} + extern int slcb_SetErrno(int Errno); /** Helper function called by SimpleLink driver to set OS-specific errno value. diff --git a/src/freertos_drivers/net_cc32xx/CC32xxWiFi.hxx b/src/freertos_drivers/net_cc32xx/CC32xxWiFi.hxx index 8f13a1547..a58fd2635 100644 --- a/src/freertos_drivers/net_cc32xx/CC32xxWiFi.hxx +++ b/src/freertos_drivers/net_cc32xx/CC32xxWiFi.hxx @@ -112,6 +112,9 @@ public: /** CC32xx SimpleLink forward declaration */ struct SockEvent; + /** CC32xx SimpleLink forward declaration */ + struct SockTriggerEvent; + /** CC32xx SimpleLink forward declaration */ struct HttpServerEvent; @@ -554,6 +557,12 @@ public: */ void sock_event_handler(SockEvent *event); + /** Notifies the service about a wifi asynchronous socket event + * callback. This means that sl_Select needs to be re-run and certain + * sockets might need wakeup. DO NOT use directly. + * @param event parameters from the socket. */ + void trigger_event_handler(SockTriggerEvent *event); + /** This function handles http server callback indication. This is public * only so that an extern "C" method can call it. DO NOT use directly. * @param event pointer to HTTP Server Event info diff --git a/src/openlcb/RoutableMessage.hxx b/src/openlcb/RoutableMessage.hxx new file mode 100644 index 000000000..7ec243101 --- /dev/null +++ b/src/openlcb/RoutableMessage.hxx @@ -0,0 +1,106 @@ +/** \copyright + * Copyright (c) 2018, Balazs Racz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * \file RoutableMessage.hxx + * + * Structure that can represent a single message in different (both abstract + * and concrete) formats. + * + * @author Balazs Racz + * @date 18 Sep 2018 + */ + +#ifndef _OPENLCB_ROUTABLEMESSAGE_HXX_ +#define _OPENLCB_ROUTABLEMESSAGE_HXX_ + +// ============ WARNING ============= +// This code is not used currently. +// ============ WARNING ============= + +#include "utils/Hub.hxx" +#include "utils/SimpleQueue.hxx" + +namespace openlcb +{ + +struct RoutableMessage; + +//typedef shared_ptr + +/// ============ WARNING ============= +/// This code is not used currently. +/// ============ WARNING ============= +struct RoutableMessage +{ + /// Filled with Node ID when this is an addressed message. 0,0 for a global + /// message. + openlcb::NodeHandle dst_; + /// Parsed message format. One ref is owned by *this unless nullptr. + Buffer *genMessage_; + /// Rendered message format for TCP. One ref is owned by *this unless + /// nullptr. + Buffer *tcpMessage_; + /// Sequence of CAN frames in gridconnect (text) format that represent this + /// message. One ref for each buffer is owned by this. + TypedQueue> gcMessages_; + /// Sequence of CAN frames binary format that represent this message. One + /// ref for each buffer is owned by this. + TypedQueue> canMessages_; + /// Represent the entry port of the message. Used for filtering global + /// messages. + void *skipMember_; + RoutableMessage() + : genMessage_ {nullptr} + , tcpMessage_ {nullptr} + { + } + + ~RoutableMessage() + { + if (genMessage_) + { + genMessage_->unref(); + } + if (tcpMessage_) + { + tcpMessage_->unref(); + } + while (!gcMessages_.empty()) + { + auto *f = gcMessages_.pop_front(); + f->unref(); + } + while (!canMessages_.empty()) + { + auto *f = canMessages_.pop_front(); + f->unref(); + } + } +}; + +} // namespace openlcb + +#endif // _OPENLCB_ROUTABLEMESSAGE_HXX_ diff --git a/src/utils/Buffer.hxx b/src/utils/Buffer.hxx index 766479844..cd49e2b93 100644 --- a/src/utils/Buffer.hxx +++ b/src/utils/Buffer.hxx @@ -183,6 +183,9 @@ protected: /** Allow LimitedPool access to our fields */ friend class LimitedPool; + /** Allow DataBufferPool access to our fields */ + friend class DataBufferPool; + DISALLOW_COPY_AND_ASSIGN(BufferBase); }; @@ -220,7 +223,6 @@ private: */ Buffer(Pool *pool) : BufferBase(sizeof(Buffer), pool) - , data_() { } @@ -234,6 +236,8 @@ private: /** Allow Pool access to our constructor */ friend class Pool; + /** Allow DataBuffer access to our constructor */ + friend class DataBuffer; /** user data */ T data_; @@ -377,6 +381,8 @@ private: friend class BufferBase; /** LimitedPool proxies to a base Pool. */ friend class LimitedPool; + /** DataBufferPool proxies to a base Pool. */ + friend class DataBufferPool; /** Allow Buffer to access this class */ template friend class Buffer; @@ -668,7 +674,6 @@ private: */ template void Buffer::unref() { - HASSERT(sizeof(Buffer) <= size_); if (count_.fetch_sub(1) == 1u) { this->~Buffer(); diff --git a/src/utils/ClientConnection.cxx b/src/utils/ClientConnection.cxx index 6d2c6ac9c..de2a0142b 100644 --- a/src/utils/ClientConnection.cxx +++ b/src/utils/ClientConnection.cxx @@ -49,5 +49,14 @@ void GCFdConnectionClient::connection_complete(int fd) FdUtils::optimize_fd(fd); fd_ = fd; - create_gc_port_for_can_hub(hub_, fd, &closedNotify_, use_select); + + if (hub_) { + create_gc_port_for_can_hub(hub_, fd, &closedNotify_, use_select); + } else if (directHub_) { + create_port_for_fd(directHub_, fd, + std::unique_ptr(create_gc_message_segmenter()), + &closedNotify_); + } else { + DIE("Neither hub, nor directhub given to connection client"); + } } diff --git a/src/utils/ClientConnection.hxx b/src/utils/ClientConnection.hxx index 553e7fa72..a26fa6b98 100644 --- a/src/utils/ClientConnection.hxx +++ b/src/utils/ClientConnection.hxx @@ -40,6 +40,7 @@ #include #include "utils/GridConnectHub.hxx" +#include "utils/DirectHub.hxx" #include "utils/socket_listener.hxx" #include "utils/FdUtils.hxx" @@ -103,6 +104,16 @@ public: { } + /// Constructor. + /// + /// @param name user-readable name for this port. + /// @param direct_hub gridconnect DirectHub to connect this device to. + GCFdConnectionClient(const string &name, ByteDirectHubInterface *direct_hub) + : closedNotify_(&fd_, name) + , directHub_(direct_hub) + { + } + virtual ~GCFdConnectionClient() { } @@ -136,10 +147,12 @@ private: /// Will be called when the descriptor experiences an error (typivcally /// upon device closed or connection lost). DeviceClosedNotify closedNotify_; - /// Filedes of the currently open device/socket. + /// File descriptor of the currently open device/socket. int fd_{-1}; /// CAN hub to read-write data to. - CanHubFlow *hub_; + CanHubFlow *hub_{nullptr}; + /// DirectHub to read/write data to. + ByteDirectHubInterface *directHub_{nullptr}; }; /// Connection client that opens a character device (such as an usb-serial) and @@ -151,10 +164,11 @@ public: /// Constructor. /// /// @param name user-readable name of this device - /// @param hub CAN packet hub to connect this device to + /// @param hub CAN packet hub or GC DirectHub to connect this device to /// @param dev filename of the device node to open + template DeviceConnectionClient( - const string &name, CanHubFlow *hub, const string &dev) + const string &name, HubType *hub, const string &dev) : GCFdConnectionClient(name, hub) , dev_(dev) { @@ -193,11 +207,12 @@ public: /// Constructor. /// /// @param name user-readable name that will be printed upon an error. - /// @param hub CAN hub to connect device to + /// @param hub CAN packet hub or GC DirectHub to connect this device to /// @param host where to connect to /// @param port where to connect to + template UpstreamConnectionClient( - const string &name, CanHubFlow *hub, const string &host, int port) + const string &name, HubType *hub, const string &host, int port) : GCFdConnectionClient(name, hub) , host_(host) , port_(port) diff --git a/src/utils/DataBuffer.cxxtest b/src/utils/DataBuffer.cxxtest new file mode 100644 index 000000000..ab52ca84b --- /dev/null +++ b/src/utils/DataBuffer.cxxtest @@ -0,0 +1,392 @@ +#include "utils/DataBuffer.hxx" + +#include "utils/test_main.hxx" + +DataBufferPool g_pool(64); + +class DataBufferTest : public ::testing::Test +{ +protected: + DataBufferTest() + { + g_pool.alloc(&b_); + lastFree_ = g_pool.free_items(); + } + + /// Adds an expectation that exactly count item has been freed. + void expect_freed(unsigned count = 1) + { + EXPECT_EQ(lastFree_ + count, g_pool.free_items()); + } + + std::string flatten(const LinkedDataBufferPtr &p) + { + std::string ret; + p.append_to(&ret); + return ret; + } + + DataBuffer *b_; + unsigned lastFree_; + + BarrierNotifiable bn_; + BarrierNotifiable bn2_; + LinkedDataBufferPtr lnk_; +}; + +TEST_F(DataBufferTest, alloc_free) +{ + EXPECT_EQ(64u, b_->size()); + b_->unref(); + expect_freed(); +} + +TEST_F(DataBufferTest, alloc_resize_free) +{ + EXPECT_EQ(64u, b_->size()); + b_->set_size(3); + b_->unref(); + expect_freed(); +} + +TEST_F(DataBufferTest, BufferPtr_free) +{ + b_->set_size(2); + { + auto r = get_buffer_deleter(b_); + } + expect_freed(); + DataBuffer *bb; + g_pool.alloc(&bb); + EXPECT_EQ(bb, b_); +} + +TEST_F(DataBufferTest, data_content) +{ + memcpy(b_->data(), "abcd", 4); + b_->set_size(4); + EXPECT_EQ('b', b_->data()[1]); + + uint8_t *p; + unsigned avail; + b_->get_read_pointer(2, &p, &avail); + EXPECT_EQ('c', *p); + EXPECT_EQ(2u, avail); +} + +TEST_F(DataBufferTest, ref_single) +{ + EXPECT_EQ(b_, b_->ref_all(13)); + EXPECT_EQ(2u, b_->references()); + expect_freed(0); + { + auto r = get_buffer_deleter(b_); + } + EXPECT_EQ(1u, b_->references()); + expect_freed(0); + b_->unref(); + expect_freed(1); +} + +TEST_F(DataBufferTest, ref_unref_linked) +{ + DataBuffer *c; + g_pool.alloc(&c); + b_->set_next(c); + b_->set_size(4); + memcpy(b_->data(), "abcd", 4); + memcpy(c->data(), "efghij", 6); + c->set_size(6); + + EXPECT_EQ(1u, b_->references()); + EXPECT_EQ(1u, c->references()); + + b_->ref_all(8); + EXPECT_EQ(2u, b_->references()); + EXPECT_EQ(2u, c->references()); + + expect_freed(0); + { + auto r = get_buffer_deleter(b_); + } + expect_freed(0); + EXPECT_EQ(1u, b_->references()); + EXPECT_EQ(2u, c->references()); + + b_->unref_all(8); + expect_freed(1); + EXPECT_EQ(1u, c->references()); + + c->unref_all(3); + expect_freed(2); +} + +TEST_F(DataBufferTest, readptr_linked) +{ + DataBuffer *c; + g_pool.alloc(&c); + b_->set_next(c); + b_->set_size(4); + memcpy(b_->data(), "abcd", 4); + memcpy(c->data(), "efghij", 6); + c->set_size(6); + + uint8_t *p; + unsigned avail; + b_->get_read_pointer(6, &p, &avail); + EXPECT_EQ('g', *p); + EXPECT_EQ(4u, avail); +} + +TEST_F(DataBufferTest, lnkbuf) +{ + // Initial state + EXPECT_EQ(64u, b_->size()); + EXPECT_EQ(0u, lnk_.free()); + + b_->set_done(bn_.reset(EmptyNotifiable::DefaultInstance())); + + // Adding some place to write to + lnk_.append_empty_buffer(b_); + EXPECT_EQ(64u, lnk_.free()); + EXPECT_EQ(0u, lnk_.size()); + EXPECT_EQ(0u, lnk_.head()->size()); + + EXPECT_EQ(lnk_.data_write_pointer(), b_->data()); + + // Adds some data. + memcpy(lnk_.data_write_pointer(), "abcd", 4); + lnk_.data_write_advance(4); + + EXPECT_EQ(4u, lnk_.size()); + EXPECT_EQ(4u, lnk_.head()->size()); + + // Appends some data. + memcpy(lnk_.data_write_pointer(), "efg", 3); + lnk_.data_write_advance(3); + + EXPECT_EQ(7u, lnk_.size()); + EXPECT_EQ(7u, lnk_.head()->size()); + EXPECT_EQ(64u - 7, lnk_.free()); + + // Test flatten. + string flat = "X"; + lnk_.append_to(&flat); + EXPECT_EQ("Xabcdefg", flat); + + // Export head. + LinkedDataBufferPtr exp1 = lnk_.transfer_head(5); + EXPECT_EQ(5u, exp1.size()); + EXPECT_EQ(0u, exp1.free()); + EXPECT_EQ(64u - 7, lnk_.free()); + EXPECT_EQ(2u, lnk_.size()); // now shorter + EXPECT_EQ(5u, lnk_.skip()); + // The head buffer still has all those bytes. + EXPECT_EQ(7u, lnk_.head()->size()); + + // Test flatten both cases. + flat = "Y"; + lnk_.append_to(&flat); + EXPECT_EQ("Yfg", flat); + + flat = "Z"; + exp1.append_to(&flat); + EXPECT_EQ("Zabcde", flat); + + // Move constructor test + LinkedDataBufferPtr exp2 = std::move(exp1); + EXPECT_EQ(0u, exp1.size()); + flat = "W"; + exp1.append_to(&flat); + EXPECT_EQ("W", flat); + flat = "P"; + exp2.append_to(&flat); + EXPECT_EQ("Pabcde", flat); + + // Copy test + exp1.reset(exp2); + flat = "W"; + exp1.append_to(&flat); + EXPECT_EQ("Wabcde", flat); + flat = "P"; + exp2.append_to(&flat); + EXPECT_EQ("Pabcde", flat); + + // Release: all three need to be reset for the buffer to be released. + expect_freed(0); + lnk_.reset(); + expect_freed(0); + exp1.reset(); + expect_freed(0); + EXPECT_FALSE(bn_.is_done()); + exp2.reset(); + expect_freed(1); + EXPECT_TRUE(bn_.is_done()); +} + +TEST_F(DataBufferTest, lnk_multi) +{ + b_->set_done(bn_.reset(EmptyNotifiable::DefaultInstance())); + // Adding two buffers with bytes on the tail. + lnk_.append_empty_buffer(b_); + EXPECT_EQ(64u, lnk_.free()); + memcpy(lnk_.data_write_pointer(), "abcd", 4); + lnk_.data_write_advance(4); + EXPECT_EQ(60u, lnk_.free()); + + g_pool.alloc(&b_); + b_->set_done(bn2_.reset(EmptyNotifiable::DefaultInstance())); + lnk_.append_empty_buffer(b_); + EXPECT_EQ(64u, lnk_.free()); + + memcpy(lnk_.data_write_pointer(), "efg", 3); + lnk_.data_write_advance(3); + EXPECT_EQ(61u, lnk_.free()); + + // Test flatten. + string flat = "X"; + lnk_.append_to(&flat); + EXPECT_EQ("Xabcdefg", flat); + EXPECT_EQ(7u, lnk_.size()); + + auto *saved_ptr = lnk_.data_write_pointer(); + auto saved_free = lnk_.free(); + + // Export head and backfill on tail. + LinkedDataBufferPtr exp1 = lnk_.transfer_head(5); + + // After transfer_head the write pointer must be the same and the free + // bytes must be the same. + EXPECT_EQ(saved_ptr, lnk_.data_write_pointer()); + EXPECT_EQ(saved_free, lnk_.free()); + EXPECT_EQ(0u, exp1.free()); // No free in the transfer. + + memcpy(lnk_.data_write_pointer(), "hijk", 4); + lnk_.data_write_advance(4); + EXPECT_EQ(57u, lnk_.free()); + + // Test flattens. + flat = "Y"; + lnk_.append_to(&flat); + EXPECT_EQ("Yfghijk", flat); + EXPECT_EQ(6u, lnk_.size()); + + flat = "Z"; + exp1.append_to(&flat); + EXPECT_EQ("Zabcde", flat); + EXPECT_EQ(5u, exp1.size()); + + EXPECT_EQ(1u, lnk_.skip()); + EXPECT_EQ(b_, lnk_.head()); + + // Export more and test flattens. + LinkedDataBufferPtr exp2 = lnk_.transfer_head(2); + + EXPECT_EQ(b_, lnk_.head()); + EXPECT_EQ(3u, lnk_.skip()); + + flat = "W"; + lnk_.append_to(&flat); + EXPECT_EQ("Whijk", flat); + EXPECT_EQ(4u, lnk_.size()); + + flat = "U"; + exp2.append_to(&flat); + EXPECT_EQ("Ufg", flat); + EXPECT_EQ(2u, exp2.size()); + + // Test copy and move operations. + LinkedDataBufferPtr exp1b; + exp1b.reset(exp1); + + exp1 = std::move(exp2); + + // Test appends. + EXPECT_TRUE(exp1b.try_append_from(exp1)); + EXPECT_EQ("abcdefg", flatten(exp1b)); + EXPECT_EQ(7u, exp1b.size()); + + exp2.reset(exp1b); + + EXPECT_EQ("abcdefg", flatten(exp2)); + EXPECT_EQ(7u, exp2.size()); + + EXPECT_TRUE(exp2.try_append_from(lnk_.transfer_head(4))); + + EXPECT_EQ("abcdefghijk", flatten(exp2)); + EXPECT_EQ(11u, exp2.size()); + + // Test data_read_advance. + exp2.data_read_advance(2); + EXPECT_EQ("cdefghijk", flatten(exp2)); + EXPECT_EQ(9u, exp2.size()); + EXPECT_EQ(2u, exp2.skip()); + + // Now the following keep refs to the head buffer: + // lnk_ no; exp1 no; exp1b yes; exp2 yes. + + exp2.data_read_advance(2); + EXPECT_EQ("efghijk", flatten(exp2)); + EXPECT_EQ(7u, exp2.size()); + EXPECT_EQ(0u, exp2.skip()); + + EXPECT_FALSE(bn_.is_done()); + exp1b.data_read_advance(5); + EXPECT_TRUE(bn_.is_done()); + EXPECT_EQ("fg", flatten(exp1b)); + + EXPECT_EQ(0u, lnk_.size()); + + // Create a chain of packets. + BarrierNotifiable bn3_; + g_pool.alloc(&b_); + b_->set_done(bn3_.reset(EmptyNotifiable::DefaultInstance())); + lnk_.append_empty_buffer(b_); + memcpy(lnk_.data_write_pointer(), "l", 1); + lnk_.data_write_advance(1); + + BarrierNotifiable bn4_; + g_pool.alloc(&b_); + b_->set_done(bn4_.reset(EmptyNotifiable::DefaultInstance())); + lnk_.append_empty_buffer(b_); + memcpy(lnk_.data_write_pointer(), "m", 1); + lnk_.data_write_advance(1); + + BarrierNotifiable bn5_; + g_pool.alloc(&b_); + b_->set_done(bn5_.reset(EmptyNotifiable::DefaultInstance())); + lnk_.append_empty_buffer(b_); + memcpy(lnk_.data_write_pointer(), "n", 1); + lnk_.data_write_advance(1); + + BarrierNotifiable bn6_; + g_pool.alloc(&b_); + b_->set_done(bn6_.reset(EmptyNotifiable::DefaultInstance())); + lnk_.append_empty_buffer(b_); + memcpy(lnk_.data_write_pointer(), "opq", 3); + lnk_.data_write_advance(3); + + LinkedDataBufferPtr exp3 = lnk_.transfer_head(5); + + EXPECT_FALSE(exp1b.try_append_from(exp3)); + EXPECT_TRUE(exp2.try_append_from(exp3)); + EXPECT_EQ("efghijklmnop", flatten(exp2)); + + EXPECT_EQ("fg", flatten(exp1)); + + EXPECT_FALSE(bn2_.is_done()); + exp2.data_read_advance(7); + EXPECT_FALSE(bn2_.is_done()); + exp1b.reset(); + EXPECT_FALSE(bn2_.is_done()); + exp1.reset(); + EXPECT_FALSE(bn2_.is_done()); + exp3.reset(); + EXPECT_TRUE(bn2_.is_done()); + EXPECT_EQ("lmnop", flatten(exp2)); + + exp2.reset(); + lnk_.reset(); + // The barriers will verify upon destruction time that they were correctly + // notified. +} diff --git a/src/utils/DataBuffer.hxx b/src/utils/DataBuffer.hxx new file mode 100644 index 000000000..d7b940715 --- /dev/null +++ b/src/utils/DataBuffer.hxx @@ -0,0 +1,625 @@ +/** \copyright + * Copyright (c) 2020, Balazs Racz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * \file DataBuffer.hxx + * + * Specialization of the Buffer<> class for storing untyped data payloads; + * allocation and Pool objects that match this behavior. + * + * @author Balazs Racz + * @date 17 Feb 2020 + */ + +#ifndef _UTILS_DATABUFFER_HXX_ +#define _UTILS_DATABUFFER_HXX_ + +#include "utils/Buffer.hxx" + +class DataBufferPool; + +/// Specialization of the Buffer class that is designed for storing untyped +/// data arrays. Adds the ability to treat the next pointers as links to +/// consecutive data bytes, ref'ing and unref'ing a sequence of buffers in one +/// go. +class DataBuffer : public Buffer +{ +public: + /// Overrides the size of the data buffer. The semantic meaning of the + /// size() of DataBuffer is the number of bytes that are filled in inside + /// data(). + /// @param s new value of size. + void set_size(uint16_t s) + { + size_ = s; + } + + /// Sets the linking pointer of the DataBuffer to a target buffer. + /// @param n next buffer to link to. + void set_next(DataBuffer *n) + { + QMember::next = n; + } + + /// @return the pointer to the next chunk of buffer. + DataBuffer *next() + { + return static_cast(QMember::next); + } + + /// @return the payload pointer, cast to a convenient type. + uint8_t *data() + { + return static_cast(&data_[0]); + } + + /// @return a new reference to *this. + DataBuffer *ref() + { + return static_cast(Buffer::ref()); + } + + /// Acquires one reference to all blocks of this buffer. + /// @param total_size the number of bytes starting from the beginning of + /// *this. + /// @param tail if non-null, will save the pointer to the tail buffer there. + /// @param tail_size if non-null, will save the number of bytes that are in + /// the tail buffer. + /// @return the first of the referenced buffers. + DataBuffer *ref_all(unsigned total_size, DataBuffer **tail = nullptr, + unsigned *tail_size = nullptr) + { + DataBuffer *curr = this; + do + { + HASSERT(curr); + curr->ref(); + if (total_size > curr->size()) + { + total_size -= curr->size(); + } + else + { + if (tail) + { + *tail = curr; + } + if (tail_size) + { + *tail_size = total_size; + } + total_size = 0; + } + curr = curr->next(); + } while (total_size > 0); + return this; + } + + /// Releases one reference to all blocks of this buffer. This includes one + /// reference to the last block which may be a partially filled buffer. + /// @param total_size the number of bytes starting from the beginning of + /// *this. + void unref_all(unsigned total_size) + { + DataBuffer *curr = this; + while (true) + { + HASSERT(curr); + if (total_size > curr->size()) + { + DataBuffer *next = curr->next(); + total_size -= curr->size(); + curr->unref(); + curr = next; + } + else + { + curr->unref(); + break; + } + } + } + + /// Helper function to read out data from a linked data buffer. + /// @param skip this is how many bytes from the beginning of the buffer + /// will be skipped. + /// @param ptr will be set to the pointer to the first non-skipped byte. + /// @param available will contain the number of available consecutive bytes + /// to read from that point on. + /// @return the data buffer where to continue reading (with skip = 0), + /// might be nullptr. + DataBuffer *get_read_pointer( + unsigned skip, uint8_t **ptr, unsigned *available) + { + DataBuffer *curr = this; + while (curr->size() <= skip) + { + skip -= curr->size(); + curr = curr->next(); + HASSERT(curr); + } + *ptr = curr->data() + skip; + *available = curr->size() - skip; + return curr->next(); + } + +private: + friend class DataBufferPool; + + DataBuffer(DataBufferPool *p) + : Buffer((Pool *)p) + { + } +}; // class DataBuffer + +using DataBufferPtr = std::unique_ptr>; + +/// A class that keeps ownership of a chain of linked DataBuffer references. +class LinkedDataBufferPtr +{ +public: + LinkedDataBufferPtr() + { + } + + ~LinkedDataBufferPtr() + { + reset(); + } + + /// Move constructor. Takes the ownership that o has. Leaves o as empty. + LinkedDataBufferPtr(LinkedDataBufferPtr &&o) + : head_(o.head_) + , tail_(o.tail_) + , size_(o.size_) + , skip_(o.skip_) + , free_(o.free_) + { + o.clear(); + } + + /// Move assignment operator. Takes the ownership that o has. Leaves o as + /// empty. + void operator=(LinkedDataBufferPtr &&o) + { + reset(); + head_ = o.head_; + tail_ = o.tail_; + size_ = o.size_; + skip_ = o.skip_; + free_ = o.free_; + o.clear(); + } + + /// We do not permit default copy operation. Use the reset() function for + /// that. + LinkedDataBufferPtr(const LinkedDataBufferPtr &) = delete; + void operator=(const LinkedDataBufferPtr &) = delete; + + /// Takes a reference of o, taking a prefix of len size (or all the + /// data). The current buffer becomes non-extensible. + /// @param o an owned LinkedDataBufferPtr + /// @param size is non-negative, this is how many bytes from the beginning + /// of o will be copied. If default (negative), takes all bytes that are + /// filled. + void reset(const LinkedDataBufferPtr &o, ssize_t size = -1) + { + reset(); + if (size < 0) + { + size = o.size_; + } + skip_ = o.skip_; + size_ = size; + // Takes references, keeping the tail and tail size. + unsigned tail_size; + head_ = o.head_->ref_all(o.skip_ + size, &tail_, &tail_size); + HASSERT(tail_size > 0); + free_ = -tail_size; + } + + /// Clears the current contents and replaces it with the empty buf. + /// @param buf is a new, empty DataBuffer. Ownership will be taken. The + /// size() value of it has to be denoting the amount of available bytes. + void reset(DataBuffer *buf) + { + reset(); + head_ = tail_ = buf; + skip_ = 0; + free_ = buf->size(); + size_ = 0; + buf->set_size(0); + } + + /// Set to a single data buffer. + /// @param buf is a filled-in data buffer. Takes ownership. Must be a + /// single (non-chained) buffer. + /// @param skip how many bytes to skip at the beginning + /// @param size how many bytes to take after skip bytes. + void reset(DataBuffer *buf, unsigned skip, unsigned size) + { + reset(); + head_ = buf; + skip_ = skip; + size_ = size; + free_ = -int(skip + size); + tail_ = buf; + } + + /// Adds an empty buffer to the end of this buffer chain. + /// @param buf is a new, empty DataBuffer. Ownership will be taken. The + /// size() value of it has to be denoting the amount of available bytes. + void append_empty_buffer(DataBuffer *buf) + { + if (!head_) + { + reset(buf); + return; + } + HASSERT(free_ >= 0); + HASSERT(tail_); + // Note: if free_ was > 0, there were some unused bytes in the tail + // buffer. However, as part of the append operation, we lose these + // bytes as capacity. The new free part will be only in the newly + // appended tail_ buffer. This is because free_ can never span more + // than one buffer. + free_ = buf->size(); + buf->set_size(0); + HASSERT(!tail_->next()); + tail_->set_next(buf); + tail_ = buf; + } + + /// Deallocates the current content (by releasing the references). + void reset() + { + if (head_) + { + head_->unref_all(size_ + skip_); + } + clear(); + } + + /// @return the pointer where data can be appended into the tail of this + /// buffer chain. Use free() to know how many bytes can be written here. + uint8_t *data_write_pointer() + { + if (!tail_) + { + return nullptr; + } + return tail_->data() + tail_->size(); + } + + /// Advances the tail pointer after a write occurred into the tail. + /// @param len how many bytes were written into the space pointed to by + /// data_write_pointer(). + void data_write_advance(size_t len) + { + HASSERT(free_ >= 0 && ((int)len <= free_)); + free_ -= len; + tail_->set_size(tail_->size() + len); + size_ += len; + } + + /// Advances the head pointer. Typically used after a successful read + /// happened. + /// @param len how many bytes to advance the read pointer. + void data_read_advance(size_t len) + { + HASSERT(len <= size()); + while (len > 0) + { + uint8_t *p; + unsigned available; + DataBuffer *next_head = + head_->get_read_pointer(skip_, &p, &available); + if ((len > available) || (len == available && len < size_)) + { + head_->unref(); + head_ = next_head; + skip_ = 0; + size_ -= available; + len -= available; + } + else + { + skip_ += len; + size_ -= len; + len = 0; + break; + } + } + } + + /// @return buffer that is at head. + DataBuffer *head() const + { + return head_; + } + + /// @return buffer that is at the tail. + DataBuffer *tail() const + { + return tail_; + } + + /// @return how many bytes to skip from the head buffer. + unsigned skip() const + { + return skip_; + } + + /// @return how many bytes are filled in the current buffer. + unsigned size() const + { + return size_; + } + + /// @return the number of bytes that can be written into the tail of this + /// buffer chain, at data_write_pointer(). + size_t free() const + { + if (free_ < 0) + { + return 0; + } + return free_; + } + + /// Transfers the ownership of the prefix of this buffer. The tail will + /// remain in the current buffer chain as an extra reference. Any free + /// space in the tail will also remain in the current bufferptr. + /// @param len how many bytes at the beginning (starting at skip_) to + /// transfer. Must reach into the tail buffer, meaning that at least one + /// byte from the tail buffer must be transferred. + /// @return a new (moveable) LinkedDataBufferPtr that will get the + /// ownership of the head. It will be non-extendible. + LinkedDataBufferPtr transfer_head(size_t len) + { + LinkedDataBufferPtr ret; + ret.head_ = head_; + ret.tail_ = tail_; + ret.skip_ = skip_; + ret.size_ = len; + + HASSERT(tail_); // always true when we have a buffer + HASSERT(len <= size_); + + size_t bytes_left = size_ - len; + + // tail_->size() is the previously used bytes in the tail buffer. The + // number of bytes not transferred shall fit into this. There must be + // however at least one byte in the tail buffer that *was* transferred. + HASSERT(bytes_left < tail_->size()); + + size_t bytes_transferred_from_tail_buffer = tail_->size() - bytes_left; + + // Since the tail is now in both the transferred chain as well as in + // the current chain, it needs an extra ref. We keep that ref. + head_ = tail_->ref(); + size_ = bytes_left; + skip_ = bytes_transferred_from_tail_buffer; + HASSERT(skip_ > 0); + // Saves the end offset of the tail buffer in ret. + ret.free_ = -bytes_transferred_from_tail_buffer; + // this->free_ remains as is. + return ret; + } + + /// Appends all content in this buffer to an std::string. + /// @param recvd string to append data to. + void append_to(std::string *recvd) const + { + DataBuffer *head = head_; + unsigned skip = skip_; + size_t len = size_; + recvd->reserve(recvd->size() + len); + while (len > 0) + { + uint8_t *ptr; + unsigned available; + head = head->get_read_pointer(skip, &ptr, &available); + if (available > len) + { + available = len; + } + recvd->append((char *)ptr, available); + len -= available; + skip = 0; + } + } + + /// Attempt to combine *this with o into a single LinkedDataBufferPtr + /// this. This tries to do `*this += o`. It will succeed if o.head() == + /// this->tail() and the bytes in these buffers are back to back. + /// @param o a LinkedDataBuffer with data payload. + /// @return true if append succeeded. If false, nothing was changed. + bool try_append_from(const LinkedDataBufferPtr &o) + { + if (!o.size()) + { + return true; // zero bytes, nothing to do. + } + if (free_ >= 0) + { + // writeable buffer, cannot append. + return false; + } + HASSERT(o.head()); + if (o.head() != tail_) + { + // Buffer does not start in the same chain where we end. + return false; + } + if (-free_ != (int)o.skip()) + { + // Not back-to-back. + return false; + } + // Now we're good, so take over the extra buffers. + tail_ = o.tail_; + free_ = o.free_; + size_ += o.size_; + // Acquire extra references + o.head_->ref_all(o.skip() + o.size()); + // Release duplicate reference between the two chains. + o.head_->unref(); + return true; + } + +private: + /// Internal helper function of constructors and reset functions. Clears + /// the current structure (references have to have been dealt with + /// before). + void clear() + { + head_ = tail_ = nullptr; + skip_ = free_ = size_ = 0; + } + + /// First buffer in the chain. This is the root of the ownership. + DataBuffer *head_ {nullptr}; + /// Last buffer in the chain. This is where we can extend the owned bytes. + DataBuffer *tail_ {nullptr}; + /// How many bytes we have filled in (counting starts at head.data() + + /// skip_). + size_t size_ {0}; + /// How many bytes to skip in the head buffer. + uint16_t skip_ {0}; + /// If >= 0: How many free bytes are there in the tail buffer. If < 0: + /// non-appendable buffer, the -offset of the first byte in the tail buffer + /// that's after the payload. In other words, -1 * the skip() of the next + /// linked buffer. In other words, -1 * the end pointer in the tail buffer. + int16_t free_ {0}; +}; + +/// Proxy Pool that can allocate DataBuffer objects of a certain size. All +/// memory comes from the mainBufferPool. +class DataBufferPool : public Pool +{ +public: + DataBufferPool(unsigned payload_size) + : payloadSize_(payload_size) + { + HASSERT(payload_size <= 65535u - sizeof(BufferBase)); + } + +#ifdef GTEST + /// Use this variable with a ScopedOverride to temporarily change how much + /// data gets allocated. + uint16_t *payload_size_override() + { + return &payloadSize_; + } +#endif + + /// Number of free items in the pool. + size_t free_items() override + { + return base_pool()->free_items(alloc_size()); + } + + /// Number of free items in the pool for a given allocation size. + /// @param size size of interest + /// @return number of free items in the pool for a given allocation size + size_t free_items(size_t size) override + { + return base_pool()->free_items(size); + } + + /** Get a free item out of the pool with untyped data of the size specified + * in the constructor. + * @param result pointer to a pointer to the result + */ + void alloc(DataBuffer **result) + { +#ifdef DEBUG_BUFFER_MEMORY + g_current_alloc = &&alloc; + alloc: +#endif + *result = static_cast( + base_pool()->alloc_untyped(alloc_size(), nullptr)); + if (*result) + { + new (*result) DataBuffer(this); + (*result)->size_ = payload_size(); + } + } + + /** Get a free item out of the pool with untyped data of the size specified + * in the constructor. + * @param result holder pointer + */ + void alloc(DataBufferPtr *result) + { + DataBuffer *b; + alloc(&b); + result->reset(b); + } + +private: + /// Internal helper funciton used by the default Buffer + /// allocimplementation. + BufferBase *alloc_untyped(size_t size, Executable *flow) override + { + DIE("DataBufferPool does not support this type of allocation."); + } + + /// Function called when a buffer refcount reaches zero. + void free(BufferBase *item) override + { + // Restores the correct size for assigning it to the right freelist + // bucket. + item->size_ = alloc_size(); + // Clears the next pointer as we are not using these for queues. + item->next = nullptr; + base_pool()->free(item); + } + + /// @return the pool from which we should get the actual memory we have. + Pool *base_pool() + { + return mainBufferPool; + } + + /// @return size of the buffers to allocate. + uint16_t alloc_size() + { + return sizeof(BufferBase) + payloadSize_; + } + + /// @return maximum number of bytes that can be stored inside an allocated + /// buffer. + uint16_t payload_size() + { + return payloadSize_; + } + + /// Number of bytes that need to be stored in each buffer. + uint16_t payloadSize_; +}; + +#endif // _UTILS_DATABUFFER_HXX_ diff --git a/src/utils/DirectHub.cxx b/src/utils/DirectHub.cxx new file mode 100644 index 000000000..af1329b9f --- /dev/null +++ b/src/utils/DirectHub.cxx @@ -0,0 +1,948 @@ +/** \copyright + * Copyright (c) 2020, Balazs Racz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * \file DirectHub.cxx + * + * Optimized class for ingress and egress of write-once objects, aimed to + * support multi-recipient messages with fast internal fan-out but low compute + * overhead. + * + * @author Balazs Racz + * @date 9 Feb 2020 + */ + +// #define LOGLEVEL VERBOSE + +#include "utils/DirectHub.hxx" + +#include +#include +#include +#include +#include + +#include "executor/AsyncNotifiableBlock.hxx" +#include "executor/StateFlow.hxx" +#include "nmranet_config.h" +#include "utils/logging.h" +#include "utils/socket_listener.hxx" + +/// This object forwards allocations to mainBufferPool. The blocks allocated +/// here are all the same size. They are used to read bytes from a tcp socket +/// into memory. +DataBufferPool g_direct_hub_data_pool( + config_directhub_port_incoming_buffer_size()); +/// This object forwards allocations to mainBufferPool. The blocks allocated +/// here are all the same size. They are used to render outgoing CAN packets +/// into gridconnect format. +DataBufferPool g_direct_hub_kbyte_pool(1024); + +/// A single service class that is shared between all interconnected DirectHub +/// instances. It is the responsibility of this Service to perform the locking +/// of the individual flows. +class DirectHubService : public Service +{ +public: + typedef Q QueueType; + + DirectHubService(ExecutorBase *e) + : Service(e) + , busy_(0) + { + } + + /// @return lock object for the busy_ flag. + Atomic *lock() + { + return pendingSend_.lock(); + } + + /// Adds a caller to the waiting list of who wants to send traffic to the + /// hub. If there is no waiting list, the caller will be executed inline. + /// @param caller represents an entry point to the hub. It is required that + /// caller finishes its run() by invoking on_done(). + void enqueue_caller(Executable *caller) + { + { + AtomicHolder h(lock()); + if (busy_) + { + /// @todo there is a short period of priority inversion here, + /// because we insert an executable into a separate queue here + /// than the Executor. We dequeue the highest priority in + /// on_done(), but if that happens to be a low priority, that + /// might get stuck in the Executor for a long time, even if in + /// the meantime a higher priority message arrives here. A + /// better strategy would be to enqueue the Executable's in the + /// Service's executor directly instead of queueing them and + /// pushing them one by one to the Service's executor. In that + /// case we'd need a third state between busy and not busy: + /// whether we're queueing executable's or whether we're + /// dumping them into the executor. We would also need to keep + /// track of how many went to the Executor already, such that + /// we know when busy_ gets back to false. + pendingSend_.insert_locked(caller); + return; + } + busy_ = 1; + } + caller->run(); + } + + /// This function must be called at the end of the enqueued functions in + /// order to properly clear the busy flag or take out the next enqueued + /// executable. + void on_done() + { + // De-queues the next entry. + Result deq; + { + AtomicHolder h(lock()); + if (pendingSend_.empty()) + { + busy_ = 0; + return; + } + deq = pendingSend_.next(); + } + // Schedules it on the executor. + executor()->add(static_cast(deq.item), deq.index); + } + + /// 1 if there is any message being processed right now. + unsigned busy_ : 1; + /// List of callers that are waiting for the busy_ lock. + QueueType pendingSend_; +}; + +template +class DirectHubImpl : public DirectHubInterface, + protected StateFlowBase, + private Atomic +{ +public: + DirectHubImpl(DirectHubService *service) + : StateFlowBase(service) + { + } + + ~DirectHubImpl() + { + delete service(); + } + + Service *get_service() override + { + return service(); + } + + void register_port(DirectHubPort *port) override + { + AtomicHolder h(this); + ports_.push_back(port); + } + + /// Synchronously unregisters a port. + void unregister_port(DirectHubPort *port) override + { + SyncNotifiable n; + unregister_port(port, &n); + n.wait_for_notification(); + } + + /// Removes a port from this hub. This port must have been registered + /// previously. + /// @param port the downstream port. + void unregister_port(DirectHubPort *port, Notifiable *done) override + { + // By enqueueing on the service we ensure that the state flow is not + // processing any packets while the code below is running. + service()->enqueue_caller(new CallbackExecutable([this, port, done]() { + { + AtomicHolder h(this); + ports_.erase(std::remove(ports_.begin(), ports_.end(), port), + ports_.end()); + } + done->notify(); + service()->on_done(); + })); + } + + void enqueue_send(Executable *caller) override + { + service()->enqueue_caller(caller); + } + + MessageAccessor *mutable_message() override + { + return &msg_; + } + + void do_send() override + { + unsigned next_port = 0; + while (true) + { + DirectHubPort *p; + { + + AtomicHolder h(this); + if (next_port >= ports_.size()) + { + break; + } + p = ports_[next_port]; + ++next_port; + } + if (should_send_to(p)) + { + p->send(&msg_); + } + } + msg_.clear(); + service()->on_done(); + } + + /// Filters a message going towards a specific output port. + /// @param p the output port + /// @return true if this message should be sent to that output port. + bool should_send_to(DirectHubPort *p) + { + return static_cast(p) != msg_.source_; + } + +private: + DirectHubService *service() + { + return static_cast(StateFlowBase::service()); + } + + /// Stores the registered output ports. Protected by Atomic *this. + std::vector *> ports_; + + /// The message we are trying to send. + MessageAccessor msg_; +}; // class DirectHubImpl + +/// Temporary function to instantiate the hub. +DirectHubInterface *create_hub(ExecutorBase *e) +{ + auto *s = new DirectHubService(e); + auto *dh = new DirectHubImpl(s); + return dh; +} + +/// Connects a (bytes typed) hub to an FD. This state flow is the write flow; +/// i.e., it waits for messages coming from the hub and writes them into the fd. +/// The object is self-owning, i.e. will delete itself when the input goes dead +/// or when the port is shutdown (eventually). +class DirectHubPortSelect : public DirectHubPort, + private StateFlowBase +{ +private: + /// State flow that reads the FD and sends the read data to the direct hub. + class DirectHubReadFlow : public StateFlowBase + { + public: + DirectHubReadFlow(DirectHubPortSelect *parent, + std::unique_ptr segmenter) + : StateFlowBase(parent->service()) + , parent_(parent) + , segmenter_(std::move(segmenter)) + { + segmenter_->clear(); + } + + /// Starts the current flow. + void start() + { + start_flow(STATE(alloc_for_read)); + } + + /// Requests the read port to shut down. Must be called on the main + /// executor. Causes the flow to notify the parent via the + /// read_flow_exit() function then terminate, either inline or not. + void read_shutdown() + { + auto *e = this->service()->executor(); + if (e->is_selected(&helper_)) + { + // We're waiting in select on reads, we can cancel right now. + e->unselect(&helper_); + set_terminated(); + buf_.reset(); + /// @todo We should first clean up the async notifiable block + /// and only signal the exit afterwards. + parent_->read_flow_exit(); + } + // Else we're waiting for the regular progress to wake up the + // flow. It will check fd_ < 0 to exit. + } + + private: + /// Root of the read flow. Starts with getting the barrier notifiable, + /// either synchronously if one is available, or asynchronously. + Action alloc_for_read() + { + QMember *bn = pendingLimiterPool_.next().item; + if (bn) + { + bufferNotifiable_ = pendingLimiterPool_.initialize(bn); + return get_read_buffer(); + } + else + { + pendingLimiterPool_.next_async(this); + return wait_and_call(STATE(barrier_allocated)); + } + } + + /// Intermediate step if asynchronous allocation was necessary for the + /// read barrier. + Action barrier_allocated() + { + QMember *bn; + cast_allocation_result(&bn); + HASSERT(bn); + bufferNotifiable_ = pendingLimiterPool_.initialize(bn); + return get_read_buffer(); + } + + /// Invoked when we have a bufferNotifiable_ from the barrier pool. + Action get_read_buffer() + { + DataBuffer *p; + LOG(VERBOSE, "read flow %p (fd %d): notif %p alloc() %u", this, + parent_->fd_, (BarrierNotifiable *)bufferNotifiable_, + (unsigned)mainBufferPool->total_size()); + // Since there is a limit on how many bufferNotifiable_'s can be, + // and they are uniquely assigned to the buffers, we know that this + // synchronous allocation can only happen for a few buffers + // only. The buffers will get recycled through the main buffer pool + // exactly at the time when the bufferNotifiable_ comes back to the + // pendingLimiterPool_. + g_direct_hub_data_pool.alloc(&p); + if (buf_.head()) + { + buf_.append_empty_buffer(p); + } + else + { + buf_.reset(p); + } + p->set_done(bufferNotifiable_); + bufferNotifiable_ = nullptr; + return do_some_read(); + } + + Action do_some_read() + { + if (parent_->fd_ < 0) + { + // Socket closed, terminate and exit. + set_terminated(); + buf_.reset(); + parent_->read_flow_exit(); + return wait(); + } + return read_single(&helper_, parent_->fd_, + buf_.data_write_pointer(), buf_.free(), STATE(read_done)); + } + + Action read_done() + { + if (helper_.hasError_) + { + LOG(INFO, "%p: Error reading from fd %d: (%d) %s", parent_, + parent_->fd_, errno, strerror(errno)); + set_terminated(); + buf_.reset(); + parent_->report_read_error(); + return wait(); + } + size_t bytes_arrived = buf_.free() - helper_.remaining_; + segmentSize_ = segmenter_->segment_message( + buf_.data_write_pointer(), bytes_arrived); + buf_.data_write_advance(bytes_arrived); + return eval_segment(); + } + + /// Checks the segmenter output; if it indicates a complete message, + /// clears the segmenter and sends off the message. + Action eval_segment() + { + if (segmentSize_ > 0) + { + // Complete message. + segmenter_->clear(); + return call_immediately(STATE(send_prefix)); + } + else + { + return incomplete_message(); + } + } + + /// Clears the segmenter and starts segmenting from the beginning of + /// the buf_. + Action call_head_segmenter() + { + uint8_t *ptr; + unsigned available; + auto *n = + buf_.head()->get_read_pointer(buf_.skip(), &ptr, &available); + HASSERT(!n); // We must be at the tail. + segmentSize_ = segmenter_->segment_message(ptr, available); + return eval_segment(); + } + + /// Called when the segmenter says that we need to read more bytes to + /// complete the current message. + Action incomplete_message() + { + if (!buf_.free()) + { + return call_immediately(STATE(alloc_for_read)); + } + return call_immediately(STATE(do_some_read)); + } + + /// Called to send a given prefix segment to the hub. + /// segmentSize_ is filled in before. + Action send_prefix() + { + // We expect either an inline call to our run() method or + // later a callback on the executor. This sequence of calls + // prepares for both of those options. + wait_and_call(STATE(send_callback)); + inlineCall_ = 1; + sendComplete_ = 0; + parent_->hub_->enqueue_send(this); // causes the callback + inlineCall_ = 0; + if (sendComplete_) + { + return send_done(); + } + return wait(); + } + + /// This is the callback state that is invoked inline by the hub. Since + /// the hub invokes this->run(), a standard StateFlow will execute + /// whatever state is current. We have set STATE(send_callback) as the + /// current state above, hence the code continues in this function. + Action send_callback() + { + auto *m = parent_->hub_->mutable_message(); + m->set_done(buf_.tail()->new_child()); + m->source_ = parent_; + // This call transfers the chained head of the current buffers, + // taking additional references where necessary or transferring the + // existing reference. It adjusts the skip_ and size_ arguments in + // buf_ to continue from where we left off. + m->buf_ = buf_.transfer_head(segmentSize_); + parent_->hub_->do_send(); + sendComplete_ = 1; + if (inlineCall_) + { + // do not disturb current state. + return wait(); + } + else + { + // we were called queued; go back to running the flow on the + // main executor. + return yield_and_call(STATE(send_done)); + } + } + + Action send_done() + { + if (buf_.size()) + { + // We still have unused data in the current buffer. We have to + // segment that and send it to the hub. + return call_head_segmenter(); + } + if (buf_.free()) + { + // We still have space in the current buffer. We can read more + // data into that space. + return do_some_read(); + } + else + { + /// @todo consider not resetting here, but allowing an empty + /// but linked DataBuffer* start the next chain. + buf_.reset(); + return alloc_for_read(); + } + } + + /// Current buffer that we are filling. + LinkedDataBufferPtr buf_; + /// Barrier notifiable to keep track of the buffer's contents. + BarrierNotifiable *bufferNotifiable_; + /// Output of the last segmenter call. + ssize_t segmentSize_; + /// 1 if we got the send callback inline from the read_done. + uint16_t inlineCall_ : 1; + /// 1 if the run callback actually happened inline. + uint16_t sendComplete_ : 1; + /// Pool of BarrierNotifiables that limit the amount of inflight bytes + /// we have. + AsyncNotifiableBlock pendingLimiterPool_ { + (unsigned)config_directhub_port_max_incoming_packets()}; + /// Helper object for Select. + StateFlowSelectHelper helper_ {this}; + /// Pointer to the owninng port. + DirectHubPortSelect *parent_; + /// Implementation (and state) of the business logic that segments + /// incoming bytes into messages that shall be given to the hub. + std::unique_ptr segmenter_; + } readFlow_; + + friend class DirectHubReadFlow; + +public: + DirectHubPortSelect(DirectHubInterface *hub, int fd, + std::unique_ptr segmenter, + Notifiable *on_error = nullptr) + : StateFlowBase(hub->get_service()) + , readFlow_(this, std::move(segmenter)) + , readFlowPending_(1) + , writeFlowPending_(1) + , hub_(hub) + , fd_(fd) + , onError_(on_error) + { +#ifdef __WINNT__ + unsigned long par = 1; + ioctlsocket(fd_, FIONBIO, &par); +#else + ::fcntl(fd, F_SETFL, O_RDWR | O_NONBLOCK); +#endif + + // Sets the initial state of the write flow to the stage where we read + // the next entry from the queue. + wait_and_call(STATE(read_queue)); + notRunning_ = 1; + + hub_->register_port(this); + readFlow_.start(); + LOG(VERBOSE, "%p create fd %d", this, fd_); + } + + ~DirectHubPortSelect() + { + } + + /// Synchronous output routine called by the hub. + void send(MessageAccessor *msg) override + { + if (fd_ < 0) + { + // Port already closed. Ignore data to send. + return; + } + { + AtomicHolder h(lock()); + if (pendingTail_ && pendingTail_->buf_.try_append_from(msg->buf_)) + { + // Successfully enqueued the bytes into the tail of the queue. + // Nothing else to do here. + return; + } + } + + /// @todo we should try to collect the bytes into a buffer first before + /// enqueueing them. + BufferType *b; + mainBufferPool->alloc(&b); + b->data()->buf_.reset(msg->buf_); + if (msg->done_) + { + b->set_done(msg->done_->new_child()); + } + // Checks if we need to wake up the flow. + { + AtomicHolder h(lock()); + if (fd_ < 0) + { + // Catch race condition when port is already closed. + b->unref(); + return; + } + pendingQueue_.insert_locked(b); + totalPendingSize_ += msg->buf_.size(); + pendingTail_ = b->data(); + if (notRunning_) + { + notRunning_ = 0; + } + else + { + // flow already running. Skip notify. + return; + } + } + notify(); + } + +private: + /// Called on the main executor when a read error wants to cancel the write + /// flow. Before calling, fd_ must be -1. + void shutdown() + { + HASSERT(fd_ < 0); + { + AtomicHolder h(lock()); + if (notRunning_) + { + // Queue is empty, waiting for new entries. There will be no new + // entries because fd_ < 0. + hub_->unregister_port(this, this); + wait_and_call(STATE(report_and_exit)); + } + // Else eventually we will get to check_for_new_message() which will + // flush the queue, unregister the port and exit. + } + } + + Action read_queue() + { + BufferType *head; + { + AtomicHolder h(lock()); + head = static_cast(pendingQueue_.next_locked().item); + HASSERT(head); + if (head->data() == pendingTail_) + { + pendingTail_ = nullptr; + } + } + currentHead_.reset(head); + nextToWrite_ = currentHead_->data()->buf_.head(); + nextToSkip_ = currentHead_->data()->buf_.skip(); + nextToSize_ = currentHead_->data()->buf_.size(); + return do_write(); + } + + Action do_write() + { + if (fd_ < 0) + { + // fd closed. Drop data to the floor. + totalPendingSize_ -= nextToSize_; + return check_for_new_message(); + } + uint8_t *data; + unsigned len; + nextToWrite_ = nextToWrite_->get_read_pointer(nextToSkip_, &data, &len); + if (len > nextToSize_) + { + len = nextToSize_; + } + nextToSkip_ = 0; + nextToSize_ -= len; + totalPendingSize_ -= len; + totalWritten_ += len; + LOG(VERBOSE, "write %u total %zu", (unsigned)len, totalWritten_); + return write_repeated( + &selectHelper_, fd_, data, len, STATE(write_done)); + } + + Action write_done() + { + if (selectHelper_.hasError_) + { + LOG(INFO, "%p: Error writing to fd %d: (%d) %s", this, fd_, errno, + strerror(errno)); + // will close fd and notify the reader flow to exit. + report_write_error(); + // Flushes the queue of messages. fd_ == -1 now so no write will be + // attempted. + return check_for_new_message(); + } + if (nextToSize_) + { + return do_write(); + } + return check_for_new_message(); + } + + Action check_for_new_message() + { + currentHead_.reset(); + AtomicHolder h(lock()); + if (pendingQueue_.empty()) + { + if (fd_ < 0) + { + // unregisters the port. All the queue has been flushed now. + hub_->unregister_port(this, this); + return wait_and_call(STATE(report_and_exit)); + } + notRunning_ = 1; + return wait_and_call(STATE(read_queue)); + } + else + { + return call_immediately(STATE(read_queue)); + } + } + + /// Terminates the flow, reporting to the barrier. + Action report_and_exit() + { + set_terminated(); + currentHead_.reset(); + write_flow_exit(); + return wait(); + } + + /// Called by the write flow when it sees an error. Called on the main + /// executor. The assumption here is that the write flow still has entries + /// in its queue that need to be removed. Closes the socket, and notifies + /// the read flow to exit. Does not typically delete this, because the write + /// flow needs to exit separately. + void report_write_error() + { + int close_fd = -1; + { + AtomicHolder h(lock()); + if (fd_ >= 0) + { + std::swap(fd_, close_fd); + } + } + if (close_fd >= 0) + { + ::close(close_fd); + } + readFlow_.read_shutdown(); + } + + /// Callback from the ReadFlow when the read call has seen an error. The + /// read flow is assumed to be exited. Takes the read entry out of the + /// barrier, notifies the write flow to stop and possibly deletes *this. + /// Called on the main executor. + void report_read_error() + { + int close_fd = -1; + { + AtomicHolder h(lock()); + if (fd_ >= 0) + { + std::swap(fd_, close_fd); + } + } + if (close_fd >= 0) + { + ::close(close_fd); + } + // take read barrier + read_flow_exit(); + // kill write flow + shutdown(); + } + + /// Callback from the read flow that it has exited. This is triggered after + /// the shutdown() call. May delete this. + void read_flow_exit() + { + LOG(VERBOSE, "%p exit read", this); + flow_exit(true); + } + + /// Marks the write flow as exited. May delete this. + void write_flow_exit() + { + LOG(VERBOSE, "%p exit write", this); + flow_exit(false); + } + + /// Marks a flow to be exited, and once both are exited, notifies done and + /// deletes this. + /// @param read if true, marks the read flow done, if false, marks the write + /// flow done. + void flow_exit(bool read) + { + bool del = false; + { + AtomicHolder h(lock()); + if (read) + { + readFlowPending_ = 0; + } + else + { + writeFlowPending_ = 0; + } + if (writeFlowPending_ == 0 && readFlowPending_ == 0) + { + del = true; + } + } + if (del) + { + if (onError_) + { + onError_->notify(); + } + delete this; + } + } + + /// @return lock usable for the write flow and the port altogether. + Atomic *lock() + { + return pendingQueue_.lock(); + } + + /// Holds the necessary information we need to keep in the queue about a + /// single output entry. Automatically unrefs the buffer whose pointer we + /// are holding when released. + struct OutputDataEntry + { + LinkedDataBufferPtr buf_; + }; + + friend class DirectHubReadFlow; + + /// Type of buffers we are enqueuing for output. + typedef Buffer BufferType; + /// Type of the queue used to keep the output buffer queue. + typedef Q QueueType; + + /// total number of bytes written to the port. + size_t totalWritten_ {0}; + + /// The buffer that is taken out of the queue while flushing. + BufferPtr currentHead_; + /// Data we are currently writing to a buffer. + DataBuffer *nextToWrite_; + /// Skip_ parameter matching nextToWrite_; + unsigned nextToSkip_; + /// Size_ parameter matching nextToWrite_; + unsigned nextToSize_; + /// Helper object for performing asynchronous writes. + StateFlowSelectHelper selectHelper_ {this}; + /// Time when the last buffer flush has happened. Not used yet. + // long long lastWriteTimeNsec_ = 0; + + /// Contains buffers of OutputDataEntries to write. + QueueType pendingQueue_; + /// Last tail pointer in the pendingQueue. If queue is empty, + /// nullptr. Protected by pendingQueue_.lock(). + OutputDataEntry *pendingTail_ = nullptr; + /// Total numberof bytes in the pendingQueue. + size_t totalPendingSize_ = 0; + /// 1 if the state flow is paused, waiting for the notification. + uint8_t notRunning_ : 1; + /// 1 if the read flow is still running. + uint8_t readFlowPending_; + /// 1 if the write flow is still running. + uint8_t writeFlowPending_; + /// Parent hub where output data is coming from. + DirectHubInterface *hub_; + /// File descriptor for input/output. + int fd_; + /// This notifiable will be called before exiting. + Notifiable *onError_ = nullptr; +}; + +extern DirectHubPortSelect *g_last_direct_hub_port; +DirectHubPortSelect *g_last_direct_hub_port = nullptr; + +void create_port_for_fd(DirectHubInterface *hub, int fd, + std::unique_ptr segmenter, Notifiable *on_error) +{ + g_last_direct_hub_port = + new DirectHubPortSelect(hub, fd, std::move(segmenter), on_error); +} + +class DirectGcTcpHub +{ +public: + /// Constructor. + /// + /// @param can_hub Which CAN-hub should we attach the TCP gridconnect hub + /// onto. + /// @param port TCp port number to listen on. + DirectGcTcpHub(DirectHubInterface *gc_hub, int port); + ~DirectGcTcpHub(); + + /// @return true of the listener is ready to accept incoming connections. + bool is_started() + { + return tcpListener_.is_started(); + } + +private: + /// Callback when a new connection arrives. + /// + /// @param fd filedes of the freshly established incoming connection. + /// + void OnNewConnection(int fd); + + /// Direct GridConnect hub. + DirectHubInterface *gcHub_; + /// Helper object representing the listening on the socket. + SocketListener tcpListener_; +}; + +void DirectGcTcpHub::OnNewConnection(int fd) +{ +#if 0 + uint32_t rcvbuf; + socklen_t len = sizeof(rcvbuf); + int ret = getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf, &len); + if (ret >= 0) + { + LOG(ALWAYS, "Socket rcvbuf %u", (unsigned)rcvbuf); + } +#endif + create_port_for_fd(gcHub_, fd, + std::unique_ptr(create_gc_message_segmenter())); +} + +DirectGcTcpHub::DirectGcTcpHub(DirectHubInterface *gc_hub, int port) + : gcHub_(gc_hub) + , tcpListener_(port, + std::bind( + &DirectGcTcpHub::OnNewConnection, this, std::placeholders::_1)) +{ +} + +DirectGcTcpHub::~DirectGcTcpHub() +{ + tcpListener_.shutdown(); +} + +void create_direct_gc_tcp_hub(DirectHubInterface *hub, int port) +{ + new DirectGcTcpHub(hub, port); +} diff --git a/src/utils/DirectHub.cxxtest b/src/utils/DirectHub.cxxtest new file mode 100644 index 000000000..f42fc0efd --- /dev/null +++ b/src/utils/DirectHub.cxxtest @@ -0,0 +1,1080 @@ +#include "utils/DirectHub.hxx" + +#include +#include + +#include "executor/StateFlow.hxx" +#include "nmranet_config.h" +#include "utils/FdUtils.hxx" +#include "utils/Hub.hxx" +#include "utils/gc_format.h" +#include "utils/test_main.hxx" + +DataBufferPool pool_64(64); + +Executor<1> g_read_executor("read_thread", 0, 1024); +Service g_read_service(&g_read_executor); +OVERRIDE_CONST_TRUE(gc_generate_newlines); +extern DataBufferPool g_direct_hub_data_pool; + +TEST_CONST(directhub_port_max_incoming_packets, 2); + +/// This state flow +class ReadAllFromFd : public StateFlowBase +{ +public: + ReadAllFromFd() + : StateFlowBase(&g_read_service) + { + } + + ~ReadAllFromFd() + { + stop(); + } + + /// Start reading all bytes from an fd. + /// @param fd the file descriptor of the socket. + /// @param cb will be called on a different thread for all data that was + /// read. + void start(int fd, std::function cb) + { + ERRNOCHECK("dup", fd_ = dup(fd)); + ERRNOCHECK("fcntl", ::fcntl(fd_, F_SETFL, O_RDWR | O_NONBLOCK)); + + cb_ = std::move(cb); + isRunning_ = true; + start_flow(STATE(read_some)); + } + + /// Stops reading. Takes about 10 msec to run. + void stop() + { + if (isRunning_) + { + doExit_ = true; + exitNotify_.wait_for_notification(); + } + } + +private: + Action read_some() + { + return read_repeated_with_timeout(&helper_, MSEC_TO_NSEC(10), fd_, + readBuf_, sizeof(readBuf_), STATE(read_done)); + } + + Action read_done() + { + size_t arrived = sizeof(readBuf_) - helper_.remaining_; + if (doExit_) + { + isRunning_ = false; + ::close(fd_); + exitNotify_.notify(); + return exit(); + } + else if (arrived > 0) + { + cb_(readBuf_, arrived); + } + return call_immediately(STATE(read_some)); + } + + /// Helper for stateflow reads. + StateFlowTimedSelectHelper helper_ {this}; + /// Buffer to read data into. + uint8_t readBuf_[1000]; + /// File descriptor to read from. + int fd_ = -1; + /// Requests the flow to exit. + bool doExit_ = false; + /// True if the flow was ever started and not yet stopped. + bool isRunning_ = false; + /// This function will be called with all incoming data. + std::function cb_; + /// Used to synchronize the exit of this flow. + SyncNotifiable exitNotify_; +}; + +/// Helper class to send message to the hub directly (i.e., not via a port). +class SendSomeData : public Executable +{ +public: + SendSomeData(DirectHubInterface *hub, const std::string &data, + size_t skip = 0) + : hub_(hub) + { + pool_64.alloc(&bufHead_); + bufHead_->set_done(&bn1_); + buf_.reset(bufHead_); + memcpy(buf_.data_write_pointer(), data.data(), data.size()); + buf_.data_write_advance(data.size()); + if (skip) + { + buf_.transfer_head(skip); + } + } + + ~SendSomeData() + { + EXPECT_TRUE(is_done()); + } + + /// Triggers the send. + void enqueue() + { + hub_->enqueue_send(this); + } + + /// Callback from the hub that actually does the send. + void run() override + { + isRunning_.post(); + hasSeenRun_ = false; + sem_.wait(); + hub_->mutable_message()->done_ = &bn2_; + hub_->mutable_message()->buf_ = buf_.transfer_head(buf_.size()); + buf_.reset(); + hub_->do_send(); + } + + bool is_done() + { + return bn1_.is_done() && bn2_.is_done() && + (bufHead_->references() == 0); + } + + DirectHubInterface *hub_; + BarrierNotifiable bn1_ {EmptyNotifiable::DefaultInstance()}; + BarrierNotifiable bn2_ {EmptyNotifiable::DefaultInstance()}; + DataBuffer *bufHead_; + LinkedDataBufferPtr buf_; + size_t len_; + size_t skip_; + /// We take one share of this semaphore when sending. Owners can go from + /// blocking to nonblocking sending this way. Default is one share. + OSSem sem_ {1}; + /// True if the Hub has already executed the callback, which is equivalent + /// to the message actually having been sent to the hub. + bool hasSeenRun_ {false}; + /// This semaphore is notified when we are blocked. + OSSem isRunning_ {0}; +}; + +/// Class that implements the Segmenter interface, and performs expectation on +/// what data arrives and that the calls are made according to the API +/// contract. Segments by | characters. +class TestSegmenter : public MessageSegmenter +{ +public: + /// @param expected_data is all the bytes that will be sent from the remote + /// endpoint. + TestSegmenter(std::shared_ptr expected_data) + : data_(expected_data) + { + } + + ssize_t segment_message(const void *data, size_t size) override + { + LOG(VERBOSE, "segment: ofs %u size %u", (unsigned)nextToReceive_, + (unsigned)size); + if (needClear_) + { + EXPECT_TRUE(isClear_) << "Failed to call clear()."; + needClear_ = false; + } + else + { + EXPECT_FALSE(isClear_) << "Unexpected call to clear()."; + } + isClear_ = false; + std::string current((char *)data, size); + EXPECT_EQ(data_->substr(nextToReceive_, size), current) + << "Wrong data passed to segmenter."; + if (current.find('|') != string::npos) + { + // have a segment end. + size_t end_ofs = data_->find('|', packetStart_); + HASSERT(end_ofs != string::npos); + ssize_t ret = end_ofs - packetStart_ + 1; + packetStart_ = end_ofs + 1; + nextToReceive_ = packetStart_; + needClear_ = true; + LOG(VERBOSE, "segment: chunk %u", (unsigned)ret); + return ret; + } + nextToReceive_ += size; + return 0; + } + + /// Resets internal state machine. The next call to segment_message() + /// assumes no previous data present. + void clear() override + { + isClear_ = true; + } + +private: + /// What data is being sent. + std::shared_ptr data_; + /// Offset in the data_ array for the first byte of the current packet. + size_t packetStart_ = 0; + /// Offset in the data_ array for the next byte we have not seen yet. + size_t nextToReceive_ = 0; + /// True if clear was called. + bool isClear_ = true; + /// True if clear needs to be called next. + bool needClear_ = true; +}; + +/// Class that implements the HubPort interface (downstream), and performs +/// expectation on what data arrives and that the calls are made according to +/// the API contract. Segments by | characters. +class TestReceiver : public DirectHubPort +{ +public: + TestReceiver(std::shared_ptr expected_data) + : data_(expected_data) + { + } + + void send(MessageAccessor *msg) override + { + ASSERT_TRUE(msg); + + // Assembles the real bytes that came into a single string. + string recvd; + msg->buf_.append_to(&recvd); + + // Expectations that we got exactly one segmented packet. + EXPECT_EQ(msg->buf_.size(), recvd.size()); + EXPECT_EQ(data_->substr(packetStart_, msg->buf_.size()), recvd) + << "Wrong data arrived as part of a packet."; + size_t next_div = data_->find('|', packetStart_) + 1; + EXPECT_EQ(next_div, packetStart_ + msg->buf_.size()) + << "Packet end unexpected."; + packetStart_ = next_div; + } + +private: + /// What data is being sent. + std::shared_ptr data_; + /// Offset in the data_ array for the first byte of the next packet. + size_t packetStart_ = 0; +}; + +/// Hub port for a legacy CAN-bus hub that allows tests to put expectations on +/// what is happening. +class TestLegacyCanReceiver : public CanHubPortInterface, private Atomic +{ +public: + TestLegacyCanReceiver(CanHubFlow *hub) + : hub_(hub) + { + hub_->register_port(this); + } + + ~TestLegacyCanReceiver() + { + hub_->unregister_port(this); + } + + /// Implements receiving data from the bus. + void send(Buffer *buf, unsigned) override + { + AtomicHolder h(this); + lastFrame_ = *buf->data(); + ++frameCount_; + if (blockPackets_) + { + blockedPackets_.emplace_back(buf); + } + else + { + buf->unref(); + } + } + + /// Inject a new CAN frame to the CAN-bus. + /// @param gc_frame gridconnect format frame. + void inject_frame(const string &gc_frame) + { + auto b = get_buffer_deleter(hub_->alloc()); + ASSERT_EQ(0, gc_format_parse(gc_frame.c_str(), b->data())); + b->data()->skipMember_ = this; + hub_->send(b.release()); + } + + /// @return number of CAN frames seen. + uint32_t count() + { + AtomicHolder h(this); + return frameCount_; + } + + /// @return last seen CAN frame + struct can_frame frame() + { + struct can_frame ret; + { + AtomicHolder h(this); + ret = lastFrame_; + } + return ret; + } + + /// Unblocks all pending packets. + void clear_blocked() + { + blockedPackets_.clear(); + } + + /// Set to true to stop acknowledging the incoming packets (from the hub). + bool blockPackets_ = false; + +private: + /// If packets are blocked, they end up here so that they get released upon + /// destruction. + std::vector > blockedPackets_; + /// Parent hub. + CanHubFlow *hub_; + /// Stores a copy of the last frame this port has seen. + struct can_frame lastFrame_; + /// Total number of frames this port has seen. + uint32_t frameCount_ {0}; +}; + +class DirectHubTest : public ::testing::Test +{ +protected: + DirectHubTest() + { +#if OPENMRN_FEATURE_BSD_SOCKETS_IGNORE_SIGPIPE + // We expect write failures to occur but we want to handle them where + // the error occurs rather than in a SIGPIPE handler. + signal(SIGPIPE, SIG_IGN); +#endif // OPENMRN_FEATURE_BSD_SOCKETS_IGNORE_SIGPIPE + + /// re-creates main buffer pool to have good expectations on the total + /// size of memory allocated. + delete mainBufferPool; + mainBufferPool = nullptr; + mainBufferPool = new DynamicPool(Bucket::init(0)); + // init_main_buffer_pool(); + } + + ~DirectHubTest() + { + if (fdOne_ >= 0) + { + flush_data(fdOne_); + ::close(fdOne_); + fdOne_ = -1; + } + if (fdTwo_ >= 0) + { + flush_data(fdTwo_); + ::close(fdTwo_); + fdTwo_ = -1; + } + wait_for_main_executor(); + wait_for_main_executor(); + bn_.notify(); // resolve current barrier. + exitNotify_.wait_for_notification(); + } + + /// Creates two hub ports via socketpair and registers them to the data + /// hub. Saves the other endpoints to fdOne_ and fdTwo_. + void create_two_ports() + { + fdOne_ = create_port(); + fdTwo_ = create_port(); + } + + /// Creates a hub port via socketpair and registers it to the data + /// hub. + /// @return the other endpoint fd. + int create_port() + { + int fd[2]; + ERRNOCHECK("socketpair", socketpair(AF_UNIX, SOCK_STREAM, 0, fd)); + + int buflen = 1700; + socklen_t optlen = sizeof(buflen); + ERRNOCHECK("setsockopt", + setsockopt(fd[0], SOL_SOCKET, SO_SNDBUF, &buflen, optlen)); + ERRNOCHECK("setsockopt", + setsockopt(fd[1], SOL_SOCKET, SO_SNDBUF, &buflen, optlen)); + + create_port_for_fd( + hub_.get(), fd[0], get_new_segmenter(), bn_.new_child()); + + portFds_.push_back(fd[0]); + + wait_for_main_executor(); + return fd[1]; + } + + /// Creates a pipe to send test-segmented (random) messages. Adds the test + /// segmenter to this port. + /// @param data the sequence of bytes that will come through this port. + void create_test_segmenter_port(std::shared_ptr data) + { + int fd[2]; + ERRNOCHECK("socketpair", socketpair(AF_UNIX, SOCK_STREAM, 0, fd)); + + int buflen = 1700; + socklen_t optlen = sizeof(buflen); + ERRNOCHECK("setsockopt", + setsockopt(fd[0], SOL_SOCKET, SO_SNDBUF, &buflen, optlen)); + ERRNOCHECK("setsockopt", + setsockopt(fd[1], SOL_SOCKET, SO_SNDBUF, &buflen, optlen)); + + create_port_for_fd(hub_.get(), fd[0], + std::unique_ptr(new TestSegmenter(data))); + fdOne_ = fd[1]; + wait_for_main_executor(); + } + + /// Creates a new hub port that will make expectations according to the + /// test-segmented messages. + /// @param data the sequence of bytes that will be sent to the hub. + void create_test_receiver_port(std::shared_ptr data) + { + receiver_.reset(new TestReceiver(data)); + hub_->register_port(receiver_.get()); + } + + std::unique_ptr get_new_segmenter() + { + if (useTrivialSegmenter_) + { + return std::unique_ptr( + create_trivial_message_segmenter()); + } + else + { + return std::unique_ptr( + create_gc_message_segmenter()); + } + } + + /// Prints a log message with queue statistics about an fd (send/receive + /// queue and buffers). + void print_queue_stats(int fd, const char *where) + { + int sndbuflen = -1; + int rcvbuflen = -1; + int sndqlen = -1; + int rcvqlen = -1; + socklen_t optlen = sizeof(int); + ERRNOCHECK("getsockopt", + ::getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuflen, &optlen)); + ERRNOCHECK("getsockopt", + ::getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuflen, &optlen)); + ERRNOCHECK("ioctl", ::ioctl(fd, SIOCINQ, &rcvqlen)); + ERRNOCHECK("ioctl", ::ioctl(fd, SIOCOUTQ, &sndqlen)); + LOG(INFO, "Q stats for fd %d (%s):|outbuf %d|outq %d|inbuf %d|inq %d|", + fd, where, sndbuflen, sndqlen, rcvbuflen, rcvqlen); + } + + /// Reads whatever data is available on fd (up to 1000 bytes) and returns + /// it as a string. + /// @param fd a readable file descriptor + /// @return data read. + string read_some(int fd) + { + char buf[1000]; + int ret = ::read(fd, buf, sizeof(buf)); + HASSERT(ret >= 0); + return string(buf, ret); + } + + ssize_t write_some(int fd) + { + string gc_packet(":X195B4111N0102030405060708;\n"); + string total; + while (total.size() < 900) + { + total += gc_packet; + } + return ::write(fd, total.data(), total.size()); + } + + /// Writes a lot of data into an fd, also being prepared for the case that + /// it gets blocked. + /// @return number of total bytes written. + size_t write_a_lot(int fd) + { + ERRNOCHECK("fcntl", ::fcntl(fd, F_SETFL, O_RDWR | O_NONBLOCK)); + size_t total = 0; + for (int i = 0; i < 200; ++i) + { + int wr = write_some(fdOne_); + if (wr >= 0) + { + total += wr; + } + wr = write_some(fdOne_); + if (wr >= 0) + { + total += wr; + } + usleep(100); + } + return total; + } + + static unsigned flush_data(int fd) + { + ::fcntl(fd, F_SETFL, O_RDWR | O_NONBLOCK); + int ret; + unsigned total = 0; + + do + { + usleep(100); + char buf[1000]; + ret = ::read(fd, buf, sizeof(buf)); + if (ret > 0) + { + total += ret; + } + } while (ret > 0); + return total; + } + + /// Generates a large random string to send to the input. The format of + /// the string is 1-2-3-4|5-6-|. Numbers are sequential, length of packets + /// segmented by | are random. + /// @param len total number of bytes to generate. + /// @param pmin the minimum length of a packet + /// @param pmax the maximum length of a packet + string generate_random_data( + size_t len, unsigned pmin = 15, unsigned pmax = 150) + { + string ret; + ret.reserve(len); + int ctr = 0; + while (ret.size() < len) + { + size_t random_plen = random(pmin, pmax); + unsigned packetlen = std::min(len - ret.size(), random_plen); + string packet; + while (packet.size() < packetlen) + { + packet += StringPrintf("%d-", ctr++); + } + packet.resize(packetlen - 1); + packet.push_back('|'); + ret += packet; + } + return ret; + } + + /// @return a random integer in an interval [pmin, pmax]. Deterministic in + /// each test run. + /// @param pmin minimum of intarval + /// @param pmax maximum of interval + unsigned random(unsigned pmin, unsigned pmax) + { + return (rand_r(&randomSeed_) % (pmax - pmin + 1)) + pmin; + } + + std::unique_ptr receiver_; + std::unique_ptr> hub_ { + create_hub(&g_executor)}; + + /// Remote endpoint of the first port. + int fdOne_ = -1; + /// Remote endpoint of the second port. + int fdTwo_ = -1; + /// Fds of the internal parts of the ports. + vector portFds_; + /// If true, uses a trivial segmenter for input, if false, a GcSegmenter. + bool useTrivialSegmenter_ = true; + /// Helper flow to drain bytes from a port. + ReadAllFromFd fdReaderFlow_; + /// Deterministic random seed for repeatable tests. + unsigned int randomSeed_ = 994433227; + + /// Hub flow for legacy CAN packets (compatible with openlcb::IfCan). + CanHubFlow legacyHub_ {&g_service}; + /// Fake entry in the legacy hub that receives and injects CAN frames. + TestLegacyCanReceiver legacyReceiver_ {&legacyHub_}; + + /// Exit notifiable -- when all ports are done. + SyncNotifiable exitNotify_; + /// This notify will have a child given to each port. + BarrierNotifiable bn_ {&exitNotify_}; +}; + +/// Sends some data from one remote socket through the hub to another remote +/// socket. +TEST_F(DirectHubTest, end_to_end_data) +{ + create_two_ports(); + + ASSERT_EQ(6, ::write(fdOne_, "abcdef", 6)); + usleep(10000); + + EXPECT_EQ("abcdef", read_some(fdTwo_)); + + ASSERT_EQ(1, ::write(fdOne_, "x", 1)); + usleep(10000); + ASSERT_EQ(1, ::write(fdOne_, "y", 1)); + usleep(10000); + ASSERT_EQ(1, ::write(fdOne_, "z", 1)); + usleep(10000); + ASSERT_EQ(1, ::write(fdOne_, "w", 1)); + usleep(10000); + + EXPECT_EQ("xyzw", read_some(fdTwo_)); +} + +/// Checks that the done notifiables are called when sending to an empty hub. +TEST_F(DirectHubTest, notifies_messages_empty) +{ + BarrierNotifiable bn1(EmptyNotifiable::DefaultInstance()); + BarrierNotifiable bn2(EmptyNotifiable::DefaultInstance()); + DataBuffer *buf; + pool_64.alloc(&buf); + EXPECT_EQ(1u, buf->references()); + buf->set_done(&bn1); + bool complete = false; + hub_->enqueue_send(new CallbackExecutable([this, buf, &bn2, &complete]() { + hub_->mutable_message()->done_ = &bn2; + hub_->mutable_message()->buf_.reset(buf, 0, 10); + hub_->do_send(); + complete = true; + })); + EXPECT_TRUE(complete); + EXPECT_TRUE(bn1.is_done()); + EXPECT_TRUE(bn2.is_done()); + EXPECT_EQ(0u, buf->references()); +} + +/// Transfers a lot of data through the hub from one remote socket to another. +TEST_F(DirectHubTest, large_end_to_end_data) +{ + create_two_ports(); + + size_t bytes = 0; + fdReaderFlow_.start( + fdTwo_, [&bytes](uint8_t *, size_t len) { bytes += len; }); + for (int i = 0; i < 100; i++) + { + write_some(fdOne_); + } + usleep(40000); + LOG(INFO, "total bytes transferred: %zu", bytes); + EXPECT_EQ(92800u, bytes); + fdReaderFlow_.stop(); +} + +/// Verifies that the on_exit notifiable is called when an error on the socket +/// causes the port to be closed. +TEST_F(DirectHubTest, close_notify) +{ + int fd[2]; + int ffd[2]; + ERRNOCHECK("socketpair", socketpair(AF_UNIX, SOCK_STREAM, 0, fd)); + ERRNOCHECK("socketpair", socketpair(AF_UNIX, SOCK_STREAM, 0, ffd)); + LOG(INFO, "socket %d %d", fd[0], fd[1]); + SyncNotifiable n; + create_port_for_fd(hub_.get(), fd[0], + std::unique_ptr(create_trivial_message_segmenter()), + &n); + create_port_for_fd(hub_.get(), ffd[0], + std::unique_ptr(create_trivial_message_segmenter())); + write_some(fd[1]); + + read_some(ffd[1]); + ::close(fd[1]); + write_some(ffd[1]); + usleep(10000); + n.wait_for_notification(); + + ::close(ffd[1]); +} + +TEST_F(DirectHubTest, random_deterministic) +{ + string d = generate_random_data(35, 3, 9); + EXPECT_EQ(35u, d.size()); + EXPECT_EQ("0-1-2-3|4-5-6-|8-9-1|11-12|13-14-1|", d); +} + +TEST_F(DirectHubTest, segmenter_small) +{ + std::shared_ptr d = + std::make_shared(generate_random_data(35, 3, 9)); + create_test_segmenter_port(d); + create_test_receiver_port(d); + for (size_t ofs = 0; ofs < d->size();) + { + size_t len = random(10, 80); + len = std::min(len, d->size() - ofs); + ::write(fdOne_, d->data() + ofs, len); + ofs += len; + wait_for_main_executor(); + usleep(100); + wait_for_main_executor(); + } +} + +TEST_F(DirectHubTest, segmenter_long) +{ + std::shared_ptr d = + std::make_shared(generate_random_data(20000, 3, 9)); + create_test_segmenter_port(d); + create_test_receiver_port(d); + for (size_t ofs = 0; ofs < d->size();) + { + size_t len = random(10, 80); + len = std::min(len, d->size() - ofs); + ::write(fdOne_, d->data() + ofs, len); + ofs += len; + wait_for_main_executor(); + usleep(100); + wait_for_main_executor(); + } +} + +TEST_F(DirectHubTest, segmenter_largepkt) +{ + std::shared_ptr d = + std::make_shared(generate_random_data(20000, 30, 130)); + create_test_segmenter_port(d); + create_test_receiver_port(d); + for (size_t ofs = 0; ofs < d->size();) + { + size_t len = random(10, 80); + len = std::min(len, d->size() - ofs); + ::write(fdOne_, d->data() + ofs, len); + ofs += len; + wait_for_main_executor(); + usleep(100); + wait_for_main_executor(); + } +} + +TEST_F(DirectHubTest, segmenter_gridconnect_sized) +{ + std::shared_ptr d = + std::make_shared(generate_random_data(20000, 26, 30)); + create_test_segmenter_port(d); + create_test_receiver_port(d); + for (size_t ofs = 0; ofs < d->size();) + { + size_t len = random(300, 1000); + len = std::min(len, d->size() - ofs); + ::write(fdOne_, d->data() + ofs, len); + ofs += len; + wait_for_main_executor(); + usleep(100); + wait_for_main_executor(); + } +} + +/// In this test we try to write a lot of data into one port while not reading +/// anything from the other. This situation should push back on the sending +/// port after some limited amount of intermediate buffers are filled. +TEST_F(DirectHubTest, socket_blocked) +{ + // This test was designed for a smaller amount of data bytes read in. + ScopedOverride ov2(g_direct_hub_data_pool.payload_size_override(), 64); + TEST_OVERRIDE_CONST(directhub_port_max_incoming_packets, 10); + + create_two_ports(); + + int buflen; + socklen_t optlen = sizeof(buflen); + ERRNOCHECK("getsockopt", + getsockopt(fdOne_, SOL_SOCKET, SO_SNDBUF, &buflen, &optlen)); + LOG(INFO, "snd buf %d", buflen); + + size_t total = write_a_lot(fdOne_); + LOG(INFO, "total %u pool %u", (unsigned)total, + (unsigned)mainBufferPool->total_size()); + print_queue_stats(fdOne_, "send remote"); + print_queue_stats(portFds_[0], "send local"); + print_queue_stats(fdTwo_, "blocked remote"); + print_queue_stats(portFds_[1], "blocked local"); + + // The total bytes buffered should not be very much, despite having tried + // to write a lot of data into the socket. + EXPECT_GT(5000u, total); + // Memory usage is also under control. + EXPECT_GT(5000u, mainBufferPool->total_size()); + + // We should survive the case when a blocked socket dies. + ::close(fdTwo_); + wait_for_main_executor(); + + total += write_a_lot(fdOne_); + + LOG(INFO, "Total %u", (unsigned)total); + EXPECT_LT(50000u, total); + // Memory usage is also under control. + EXPECT_GT(5000u, mainBufferPool->total_size()); +} + +/// In this test we try to write a lot of data into one port while not reading +/// anything from the other. This situation should push back on the sending +/// port after some limited amount of intermediate buffers are filled. +TEST_F(DirectHubTest, socket_blocked_gc) +{ + // This test was designed for a smaller amount of data bytes read in. + ScopedOverride ov2(g_direct_hub_data_pool.payload_size_override(), 64); + TEST_OVERRIDE_CONST(directhub_port_max_incoming_packets, 10); + + useTrivialSegmenter_ = false; + create_two_ports(); + + int buflen; + socklen_t optlen = sizeof(buflen); + ERRNOCHECK("getsockopt", + getsockopt(fdOne_, SOL_SOCKET, SO_SNDBUF, &buflen, &optlen)); + LOG(INFO, "snd buf %d", buflen); + + size_t total = write_a_lot(fdOne_); + LOG(INFO, "total %u pool %u", (unsigned)total, + (unsigned)mainBufferPool->total_size()); + // The total bytes buffered should not be very much, despite having tried + // to write a lot of data into the socket. + EXPECT_GT(5000u, total); + // Memory usage is also under control. + EXPECT_GT(5000u, mainBufferPool->total_size()); + + while (total > 0) + { + total -= flush_data(fdTwo_); + } +} + +/// In this test we try to write a lot of data into one port while not reading +/// anything from the other. This situation should push back on the sending +/// port after some limited amount of intermediate buffers are filled. +/// +/// This test uses the default buffer sizes. +TEST_F(DirectHubTest, socket_blocked_gc_largebuf) +{ + delete mainBufferPool; + mainBufferPool = nullptr; + // Makes sure that 1460-byte buffers are kept around in the freelist. Then + // pool->total_size() will count the largest number of 1460-byte sized + // packets concurrently allocated. + mainBufferPool = new DynamicPool(Bucket::init(96, 1520, 0)); + + useTrivialSegmenter_ = false; + create_two_ports(); + + size_t total = write_a_lot(fdOne_); + LOG(INFO, "total %u pool %u", (unsigned)total, + (unsigned)mainBufferPool->total_size()); + // We don't make an expectation on how much data was written into the + // socket, because that depends on how much the kernel is willing to take + // into the blocked output queue. + // EXPECT_GT(5000u, total); + + // Memory usage is also under control. This is peak memory usage, because + // the buffer pool has buckets larger than a TCP packet size. + EXPECT_GT(6000u, mainBufferPool->total_size()); + + while (total > 0) + { + total -= flush_data(fdTwo_); + } +} + +/// Proxies data to two remote sockets from a locally injected source. Checks +/// that done notifiables are called and data arrives correctly. +TEST_F(DirectHubTest, local_source_two_targets) +{ + create_two_ports(); + SendSomeData d(hub_.get(), "abcde"); + d.enqueue(); + wait_for_main_executor(); + EXPECT_TRUE(d.is_done()); + usleep(2000); + string rda = read_some(fdOne_); + string rdb = read_some(fdTwo_); + EXPECT_EQ("abcde", rda); + EXPECT_EQ("abcde", rdb); +} + +/// Simulates a race condition between two threads sending. +TEST_F(DirectHubTest, race_condition) +{ + create_two_ports(); + SendSomeData d(hub_.get(), "a"); + d.sem_.wait(); // makes it blocking. + g_read_executor.add(new CallbackExecutable([&d]() { d.enqueue(); })); + d.isRunning_.wait(); // blocked indeed. + + SendSomeData d2(hub_.get(), "b"); + d2.enqueue(); + SendSomeData d3(hub_.get(), "c"); + d3.enqueue(); + SendSomeData d4(hub_.get(), "d"); + d4.enqueue(); + wait_for_main_executor(); + EXPECT_FALSE(d2.hasSeenRun_); + EXPECT_FALSE(d2.is_done()); + + d.sem_.post(); // unblock + d4.isRunning_.wait(); // should get all the way to d4. + wait_for_main_executor(); + + EXPECT_TRUE(d.is_done()); + EXPECT_TRUE(d2.is_done()); + EXPECT_TRUE(d3.is_done()); + EXPECT_TRUE(d4.is_done()); + usleep(2000); + string rda = read_some(fdOne_); + string rdb = read_some(fdTwo_); + EXPECT_EQ("abcd", rda); + EXPECT_EQ("abcd", rdb); +} + +/// Tests that skip_ is correctly handled. +TEST_F(DirectHubTest, check_skip) +{ + create_two_ports(); + SendSomeData d(hub_.get(), "abc"); + d.enqueue(); + SendSomeData d2(hub_.get(), "xyzde", 3); + d2.enqueue(); + wait_for_main_executor(); + EXPECT_TRUE(d2.is_done()); + usleep(2000); + string rda = read_some(fdOne_); + string rdb = read_some(fdTwo_); + EXPECT_EQ("abcde", rda); + EXPECT_EQ("abcde", rdb); +} + +/// Tests that skipping and chaining is correctly handled. +TEST_F(DirectHubTest, check_size_skip_chain) +{ + create_two_ports(); + BarrierNotifiable bn2(EmptyNotifiable::DefaultInstance()); + DataBuffer *buf1; + DataBuffer *buf2; + pool_64.alloc(&buf1); + pool_64.alloc(&buf2); + buf2->set_done(&bn2); + ASSERT_EQ(64u, buf1->size()); + buf1->set_size(13); + EXPECT_EQ(13u, buf1->size()); + unsigned skip = buf1->size() - 3; + memcpy(buf1->data() + skip, "abc", 3); + buf1->set_next(buf2); + memcpy(buf2->data(), "de", 2); + + hub_->enqueue_send(new CallbackExecutable([this, skip, buf1]() { + hub_->mutable_message()->buf_.reset(buf1, skip, 5); + hub_->do_send(); + })); + wait_for_main_executor(); + EXPECT_EQ(0u, buf1->references()); + EXPECT_EQ(0u, buf2->references()); + EXPECT_TRUE(bn2.is_done()); + usleep(2000); + string rda = read_some(fdOne_); + string rdb = read_some(fdTwo_); + EXPECT_EQ("abcde", rda); + EXPECT_EQ("abcde", rdb); +} + +TEST_F(DirectHubTest, can_bridge_create) +{ + std::unique_ptr bridge( + create_gc_to_legacy_can_bridge(hub_.get(), &legacyHub_)); +} + +TEST_F(DirectHubTest, can_bridge_send_recv) +{ + std::unique_ptr bridge( + create_gc_to_legacy_can_bridge(hub_.get(), &legacyHub_)); + + useTrivialSegmenter_ = false; // gridconnect segmenter + fdOne_ = create_port(); + + FdUtils::repeated_write(fdOne_, ":X195B4333N8877665544332211;", 29); + usleep(1000); + wait_for_main_executor(); + EXPECT_EQ(1u, legacyReceiver_.count()); + auto frame = legacyReceiver_.frame(); + EXPECT_TRUE(IS_CAN_FRAME_EFF(frame)); + EXPECT_EQ(0x195b4333u, GET_CAN_FRAME_ID_EFF(frame)); + EXPECT_EQ(8u, frame.can_dlc); + EXPECT_EQ(0x66u, frame.data[2]); + EXPECT_EQ(0x11u, frame.data[7]); + + // Send frame the other way + legacyReceiver_.inject_frame(":X1f555333NF1F2F3F4F5F6F7F8;"); + wait_for_main_executor(); + usleep(1000); + string d = read_some(fdOne_); + EXPECT_EQ(":X1F555333NF1F2F3F4F5F6F7F8;\n", d); + + // Hopefully did not come back. + EXPECT_EQ(1u, legacyReceiver_.count()); +} + +TEST_F(DirectHubTest, can_bridge_send_much) +{ + std::unique_ptr bridge( + create_gc_to_legacy_can_bridge(hub_.get(), &legacyHub_)); + + useTrivialSegmenter_ = false; // gridconnect segmenter + fdOne_ = create_port(); + + write_some(fdOne_); + usleep(1000); + wait_for_main_executor(); + EXPECT_EQ(929u / 29, legacyReceiver_.count()); +} + +TEST_F(DirectHubTest, can_bridge_block) +{ + std::unique_ptr bridge( + create_gc_to_legacy_can_bridge(hub_.get(), &legacyHub_)); + + legacyReceiver_.blockPackets_ = true; + useTrivialSegmenter_ = false; // gridconnect segmenter + fdOne_ = create_port(); + + auto total = write_a_lot(fdOne_); + wait_for_main_executor(); + + LOG(INFO, "total %u pool %u", (unsigned)total, + (unsigned)mainBufferPool->total_size()); + + // value of 100 is 2 (packets) * 1460 (bytes/packet) / 29 (bytes per can + // frame). + EXPECT_EQ(100u, legacyReceiver_.count()); + + // The total bytes buffered should not be very much, despite having tried + // to write a lot of data into the socket. + EXPECT_GT(5000u, total); + // Memory usage is also under control. Not as small as we'd like, because + // every packet has a copy in a CAN buffer. + EXPECT_GT(8000u, mainBufferPool->total_size()); + + // Once unblocked, we get a lot of flow. + legacyReceiver_.clear_blocked(); + legacyReceiver_.blockPackets_ = false; + + total += write_a_lot(fdOne_); + wait_for_main_executor(); + + LOG(INFO, "total %u pool %u", (unsigned)total, + (unsigned)mainBufferPool->total_size()); + + EXPECT_LT(50000u, total); + EXPECT_LT(1000u, legacyReceiver_.count()); +} diff --git a/src/utils/DirectHub.hxx b/src/utils/DirectHub.hxx new file mode 100644 index 000000000..cea41197a --- /dev/null +++ b/src/utils/DirectHub.hxx @@ -0,0 +1,252 @@ +/** \copyright + * Copyright (c) 2020, Balazs Racz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * \file DirectHub.hxx + * + * Optimized class for ingress and egress of write-once objects, aimed to + * support multi-recipient messages with fast internal fan-out but low compute + * overhead. + * + * @author Balazs Racz + * @date 9 Feb 2020 + */ + +#ifndef _UTILS_DIRECTHUB_HXX_ +#define _UTILS_DIRECTHUB_HXX_ + +#include "executor/Executor.hxx" +#include "utils/DataBuffer.hxx" + +class Service; + +/// Empty class that can be used as a pointer for identifying where a piece of +/// data came from. Used as base class for hub ports. +class HubSource +{ }; + +/// Metadata that is the same about every message (independent of data type). +struct MessageMetadata +{ + /// Clears the message metadata, including notifying the barrier, if set. + void clear() + { + if (done_) + { + done_->notify(); + done_ = nullptr; + } + source_ = dst_ = nullptr; + isFlush_ = false; + } + + /// Sets the done notifiable to a barrier. + /// @param ref a new reference to a barrier notifiable. Will be notified + /// once. + void set_done(BarrierNotifiable *done) + { + if (done_) + { + done_->notify(); + done_ = nullptr; + } + done_ = done; + } + + /// This must be notified when the processing of the message is + /// complete. All forks coming from the message must take children. + BarrierNotifiable *done_ = nullptr; + /// Represents the input port where the message came in. + HubSource *source_ = nullptr; + /// Represents the output port where the message will leave. nullptr means + /// broadcast. + HubSource *dst_ = nullptr; + /// If true, this message should flush the output buffer. + bool isFlush_ = false; +}; + +/// Typed message class. Senders to the hub will use this interface to fill in +/// the message details on the hub. +template struct MessageAccessor : public MessageMetadata +{ +public: + void clear() + { + payload_.reset(); + MessageMetadata::clear(); + } + + /// Contains a reference of the actual data. + BufferPtr payload_; +}; + +/// Type specializer for message interface when we are sending untyped data +/// (i.e., byte streams). +template <> struct MessageAccessor : public MessageMetadata +{ + void clear() + { + // Walks the buffer links and unrefs everything we own. + buf_.reset(); + MessageMetadata::clear(); + } + /// Owns a sequence of linked DataBuffers, holds the offset where to start + /// reading in the first one, and how many bytes are total in scope for + /// this message. + LinkedDataBufferPtr buf_; +}; + +/// Abstract base class for segmenting a byte stream typed input into +/// meaningful packet sized chunks. +/// +/// Implementations have to be stateful and are instantiated per specific input +/// port. Implementations have to be thread-compatible. +class MessageSegmenter : public Destructable +{ +public: + /// Makes a segmenting decision given more input data. This function will + /// be called by the input routine repeatedly with the additional payload + /// (non-overlapping) until the function returns a non-zero response. + /// + /// That response tells how many bytes long the last packet was, which must + /// be <= the sum of the size arguments passed in. Thereafter the read flow + /// will call clear() and call segment_message() again with the remaining + /// partial buffer. + /// + /// @param data beginning of the buffer pointing to the next unsegmented + /// data array. + /// @param size how many bytes of data are available at this address. Must + /// be >0. + /// @return 0 if no complete packet was yet seen; positive value N if the + /// packet that starts in the first segment_message call is N bytes + /// long. Negative return are reserved (shall not be returned at this + /// point). + virtual ssize_t segment_message(const void *data, size_t size) = 0; + + /// Resets internal state machine. The next call to segment_message() + /// assumes no previous data present. + virtual void clear() = 0; +}; + +/// Interface for a downstream port of a hub (aka a target to send data to). +template class DirectHubPort : public HubSource +{ +public: + /// Send some data out on this port. The callee is responsible for + /// buffering or enqueueing the data that came in this call. + /// @param msg represents the message that needs to be sent. The callee + /// must not modify the message. + virtual void send(MessageAccessor *msg) = 0; +}; + +/// Interface for a the central part of a hub. +template class DirectHubInterface : public Destructable +{ +public: + /// @return an executor service. + virtual Service *get_service() = 0; + + /// Adds a port to this hub. This port will be receiving all further + /// messages. + /// @param port the downstream port. + virtual void register_port(DirectHubPort *port) = 0; + /// Synchronously removes a port from this hub. This port must have been + /// registered previously. Must not be called on the main executor. + /// @param port the downstream port. + virtual void unregister_port(DirectHubPort *port) = 0; + /// Asynchronously removes a port from this hub. This port must have been + /// registered previously. + /// @param port the downstream port. + /// @param done will be notified when the removal is complete. + virtual void unregister_port(DirectHubPort *port, Notifiable *done) = 0; + + /// Signals that the caller wants to send a message to the hub. When the + /// hub is ready for that, will execute *caller. This might happen inline + /// within this function call, or on a different executor. + /// @param caller callback that actually sends the message. It is required + /// to call do_send() inline. + virtual void enqueue_send(Executable *caller) = 0; + + /// Accessor to fill in the message payload. Must be called only from + /// within the callback as invoked by enqueue_send. + /// @return mutable structure to fill in the message. This structure was + /// cleared by the hub before it is returned here. + virtual MessageAccessor *mutable_message() = 0; + + /// Sends a message to the hub. Before this is called, the message has to + /// be filled in via mutable_message(). + virtual void do_send() = 0; +}; + +typedef DirectHubInterface ByteDirectHubInterface; + +/// Creates a new byte stream typed hub. +ByteDirectHubInterface *create_hub(ExecutorBase *e); + +/// Creates a hub port of byte stream type reading/writing a given fd. This +/// port will be automaticelly deleted upon any error reading/writing the fd +/// (unregistered and memory released). +/// @param hub hub instance on which to register the new port. Onwership +/// retained by caller. +/// @param fd where to read and write data. +/// @param segmenter is an newly allocated object for the given protocol to +/// segment incoming data into messages. Transfers ownership to the function. +/// @param on_error this will be notified if the port closes due to an error. +void create_port_for_fd(ByteDirectHubInterface *hub, int fd, + std::unique_ptr segmenter, + Notifiable *on_error = nullptr); + +/// Creates a new GridConnect listener on a given TCP port. The object is +/// leaked (never destroyed). +/// @param hub incoming and outgoing data will be multiplexed through this hub +/// instance. +/// @param port the TCP port to listen on. +void create_direct_gc_tcp_hub(ByteDirectHubInterface *hub, int port); + +/// Creates a message segmenter for gridconnect data. +/// @return a newly allocated message segmenter that chops gridconnect packets +/// off of a data stream. +MessageSegmenter *create_gc_message_segmenter(); + +/// Creates a message segmenter for arbitrary data. Each buffer is left alone. +/// @return a newly allocated message segmenter. +MessageSegmenter *create_trivial_message_segmenter(); + +// Forward declarations to avoid needing to include Hub.hxx here. +template class GenericHubFlow; +template class HubContainer; +struct CanFrameContainer; +typedef HubContainer CanHubData; +typedef GenericHubFlow CanHubFlow; + +/// Creates a bridge between a gridconnect-based DirectHub and an old style CAN +/// hub flow. +/// @param gc_hub the gridconnect hub. +/// @param can_hub the CAN hub. +/// @return an object that can be deleted (only outside the main executor). +Destructable *create_gc_to_legacy_can_bridge( + DirectHubInterface *gc_hub, CanHubFlow *can_hub); + +#endif // _UTILS_DIRECTHUB_HXX_ diff --git a/src/utils/DirectHub.md b/src/utils/DirectHub.md new file mode 100644 index 000000000..381e881af --- /dev/null +++ b/src/utils/DirectHub.md @@ -0,0 +1,585 @@ +# DirectHub design + +DirectHub is a high performance router component that is suited to do the +forwarding of packets to multiple receivers with minimal CPU and latency +overhead. + +It specifically addresses three performance issues with the traditional CanHub +/ Dispatcher infrastructure: + +- DirectHub is zero-copy when forwarding packets between sockets. There is a + buffer which is filled with a ::read on the source socket, and then the + DirectHub passes around a reference to this buffer all the way to the output + object, which then ::write's it to the output socket. +- CanHub and GcTcpHub as they operate together perform separate GridConnect + formatting for every port. When data passes from one port in to three others + out, there would be one parsing and three separate GridConnect rendering + calls. DirectHub uses a single GridConnect representation and passes around + that representation. Only one parsing is done when a CanIf needs a struct + can_frame. +- DirectHub performs inline calls to the ports when forwarding the packet, + while CanHub/GcTcpHub allocates a new copy of the buffer which then gets + enqueued separately for each port's queue, on separate StateFlows. This means + that the Executor is spinning a lot less for DirectHub, therefore the context + switching overhead is much smaller. (note 1) + +As future expansion, DirectHub by design will allow routing packets across +multiple interface types (e.g. CAN, GridConnect and native-TCP), apply packet +filtering, and admission control / fair queueing for multiple trafic sources. + +_(note 1):_ There is a conceptual problem in `Buffer*` in that it conflates +two different but equally important characteristics of data flow. A `Buffer` +is reference-counted, and it can be queued. However, while different owners may +hold separate references (to the same memory), only one owner is allowed to +enqueue a `Buffer` into a `Q`, `QList`, or `StateFlowWithQueue`. This is +because there is only one `QMember` pointer in the `BufferBase`. The result of +this conflation is that when a `Dispatcher` or a `Hub` / `CanHub` sends the +same data to multiple different ports or flows, it needs to actually create a +separate copy for each one of them, and taking a reference is not sufficient. + + +## Theory of operation + + +### Entry flow and threading model + +In order to make the router have as little overhead as possible, almost +everything about the router should be happening inline instead of +asynchronously / via queueing. Virtual function calls are okay, but +StateFlowWithQueue operations should be avoided. + +Inline calls mean that there is a difference in threading concept: most of the +time we use the thread of the caller. When concurrent calls are performed, we +have to hold one of those calls until the other is complete. + +Upon an entry call (after the admission controller, see later) we want to first +check if the router is idle. If yes, we should grab a lock and start processing +the message inline. If the router is busy, we should queue the incoming +caller. To allow for both of these, the entry call doesn't actually give us a +message, we get a callback instead that we'll invoke. The sender renders the +actual message in that callback. + +After processing the message, the router goes back to idle if the queue of held +callers is found to be empty. + +If the queue is non-empty, that means that a different thread called the router +while we were sending a message on the current thread. We notice this in the +`on_done()` method of the service. In this case the router remains busy and the +queue front is taken out for processing. The queue front is always an +Executable and it will be scheduled on the Service's executor (effectively +yielding), while the inline caller's thread gets released. + +A consequence is that the caller's send callback may be called either on the +caller's thread inline, or on the Service's thread, sometime later after the +caller signaled the intention of sending something to the DirectHub. + +A special case of this threading model is that when the caller runs on the same +executor as the DirectHub, then the actual send callback is guaranteed to +happen on that executor. This is the typical case on a single-processor OpenMRN +application. + +### Entry API + +The Entry API defines how to send traffic to the DirectHub. It is defined by +`DirectHubInterface` and `MessageAccessor` in `DirectHub.hxx`. + +This is an integrated API that will internally consult the admission controller +(not implemented, see later). There are three possible outcomes of an entry +call: +1. admitted and execute inline +2. admitted but queued +3. not admitted, blocked asynchronously. (this doesn't happen today) + +When we queue or block the caller, a requirement is to not block the caller's +thread. This is necessary to allow Executors and StateFlows sending traffic to +the DirectHub. + +When blocked, the best solution is to queue Executables (these are +queueable). So we put them into a queue, and we put the next one onto the +executor (yield) whenever we're ready, which is typically when the current +packet's processing and routing is done. + +If we're idle (available) to process the packet upon the entry call, we want to +run it inline by calling run() on the Executable from the caller's thread. + +In other words, assuming the caller is a StateFlow, the inline execution just +means that we `run()` the executable instead of `notify()`'ing it. + +The syntax to prepare for both of this from a calling StateFlow (any +`StateFlowBase`): + +``` +Action have_message() { + // Makes the next run() go to fill_request(), but does not put *this onto + // the executor. + wait_and_call(STATE(fill_request)); + // Will cause run() to be called now or later. + target->enqueue_send(this); + // Ensures we do not disturn state_ or the notification. + return wait(); +} + +Action fill_request() { + target->mutable_message()->set_...; // fills message buffer + target->do_send(); + // should not be call_immediately() because threading is not certain at this + // point. + return yield_and_call(STATE(something_next)); +} +``` + +There is a slightly more complicated sequence of states to do if the yield at +the end is undesired. The actual implementation of gridconnect / socket read +flows use this more complicated mechanism to process multiple gridconnect +frames that might have come with a single TCP packet. + +### Exit API + +The Exit API defines how to the DirectHub sends traffic to the ports. It is +defined by `DirectHubPort` and the same `MessageAccessor` in +`DirectHub.hxx`. + +Since we are trying to make as much of the DirectHub processing happen inline, +the exit API is synchronous. The exit target is responsible for any queueing +that needs to happen. This is very much like the current FlowInterface<>. + +The exit API does not by definition get a ref of the payload. If they need one, +they should take one inline. However, unlike `FlowInterface`, this means +that they can not use a Buffer pointer they get from putting it into a +queue. If they need to queue, they have to allocate a new QMember +somewhere. See (note 1) in the introduction on a significant difference that +this makes. + +It is guaranteed that there is at least one ref is held during the time of the +call, and the caller (the hub) will release that ref sometime after the exit +call has returned. + +The exit call gets an accessor instead of a sequence of parameters. The memory +for the accessor is owned by the hub, and allows the target to inquire the +necessary parameters. The accessor is assumed to be available only during the +exit call and after the exit call has returned the accessor will be reused for +other messages. This is harmonized with the entry API where we are not queueing +_data_ but we are queueing _data sources_, which then fill in the data when we +are ready for them to do so. + +API: + +``` +class DirectHubPort +{ + void send(MessageAccessor *message); +}; + +class MessageAccessor +{ + HubSource *source_; + HubSource *dst_; + BarrierNotifiable *done_; + + bool isFlush_; + + // For string typed hubs we have a BufferPtr<> data_ with a skip_ and size_ + // encapsulated in a class: + LinkedDataPtr payload_; + + // For arbitrary hubs we have a reference to a buffer: + BufferPtr payload_; +}; +``` + +An important aspect is that the MessageAccessor is a constant sized object. The +real payload is always kept as a reference to a Buffer that was allocated by +the sender object. Output ports are allowed / encouraged to hold on to +references to this Buffer, which allows the zero-copy operation. + +### Runner + +The hub has at most one current message at any point in time (zero if the hub +is idle, one if the hub is busy). This is the message that is being sent by the +port that was last executed. The MessageAccessor is owned by the runner, +and accessed by the port during the entry API to fill in payload and message +parameters, and passed on to the ports as part of the exit API. There is no +queue of messages. + +The runner is not a classic StateFlow, because of the lack of this queue. The +runner only manages the concurrency and queueing of the senders. After the +designated sender fills in the message in the MessageAccessor, the runner is +informed that it shall process the packet. This happens without yielding, by an +inline call to `do_send()` on the `DirectHubInterface`. + +Internally, `do_send()` performs the iteration over output ports, calling all +the exit APIs synchronously. Once this is complete, the message gets cleared, +which releases the leftover reference owned by the DirectHub. Then the service +is informed that it may now look for additional callers +(`DirectHubService::on_done()`) that may have been enqueued. If none there, the +hub goes idle. For an inline caller, the control returns to the caller, and it +may attempt to send another packet. This allows a single caller to send a +sequence of messages without needing to yield or spin on an executor. + +When we give the packet to an output port, that operation should never block +the router. We should rather block the incoming port than the router. It's the +job of the incoming admission controller to hold back; in the absence of that +the limit on the number and byte length of the buffers makes the data source +hold back. + +### Output buffering + +For TCP based output ports (both gridconnect-CAN-TCP and native TCP, but not +gridconnect-USB) we want to ensure that the number of kernel calls is much less +than the number of GridConnect packets that are being sent. This is essential +in keeping the costs low, especially that on the CC32xx platform where each +kernel call turns effectively into a packet to be sent to the network. + +The DirectHub gets one call and one iteration for each GridConnect packet. + +The mechanism that the legacy HubDevice infrastructure used is to create a +BufferPort, which internally reassembles these packets into larger buffers +whenever they come within a certain period of time. This results in data copies +unfortunately. + +The DirectHub infrastructure appraches this differently. Instead of +copying the input data into a separate buffer, it attempts to recognize when +the input data came from the same source and used consecutive bytes of the same +buffer. This is accomplished by comparing the Buffer references and offset/size +values of consecutive calls (see `LinkedDataBufferPtr::try_append_from()` in +`DataBuffer.hxx`). When two packets came from consecutive bytes of a single +input buffer, then the two references are united into a single reference with a +longer size. So long as the calls of the DirectHub are without yield, this +works until the entire input buffer is reassembled into a single output buffer, +which will be then written with a single `::write()` call to the socket. + +While this mechanism is rather limited, it solves the the high-throughput +problem, when an input client is sending a datagram or stream with a large +number of CAN frames, a single 1460-byte read succeeds from the input socket, +then a sequence of sends happen through the directhub without yielding. On the +output there will be one write for almost all of the data, except a partial +GridConnect packet which had to be held until the next read. + +Since the output object keeps the reference to the input buffer, the input +port's read flow will not observe the memory released until the output write +has completed. Since the input port has a limited number of such buffers, this +creates effective back-pressure on the input port not reading too much data +into memory. + +### Message representation for untyped data in transit + +See (note 1) in the introduction for background about the difference between +reference counted objects and queueable objects (QMembers). Specifically, it is +important to separate the queuing concept from the shared ownership of the data +payload. This is because the same data payload might be owned by multiple +output ports, but if the queue next pointer is tied to the buffer, then we +cannot effectively share. + +Generally, all data during transit is represented in BufferPtr. This is a +reference to an input buffer, but is not queueable. DirectHub does not use data +queues internally, so that's OK. + +For untyped / string data, we need to ensure that we keep the length of the +contents as well. However, we don't generally know the length of the contents +until the read has happened and we have processed the incoming data on the +source port. + +To avoid having to copy data, we perform a single longer read into a large +buffer (typically 1460 bytes, the size of a TCP frame), then we segment this +into individual messages. Each such message will have a reference to the longer +buffer, and an offset and a length attribute (called skip_ and size_). + +A particular case to be handled is when one message spans reaches beyond the +end of one such buffer and into the beginning of the next buffer. It could also +happen that a message is longer than 1460 bytes. + +For this purpose we keep `BufferBase` objects linked to each other using the +`next_` pointers. The queue created by the `next_` pointers means that the data +payload continues in the next buffer. This is different from the +`StateFlowWithQueue` infrastructure, and generally the `Q` ans `QList` classes, +where the `next_` pointer means that there is another data item (a different +message) waiting to be processed by the same StateFlow later. + +The implementation of this mechanism is in `LinkedDataBufferPtr` in +`utils/DataBuffer.hxx`. + +Some earlier notes: + +BufferBase has the ability to do refcounting and shared ownership. It is +possible to have a BufferBase that has untyped payload (i.e., just +bytes). However, the BufferBase needs to know the amount of bytes as input; we +cannot trim down the actual bytes read from the BufferBase's size field, or +else we lose memory because after freeing the buffer will not be returned to +the right size. An alternative possibility is to have a buffer pool that +generates a single size buffer so everything is returned to the same +queue. Then size can be adjusted to the exact number of bytes read. This might +utilize a proxy buffer pool that takes buffer of a given size from the main +buffer pool and then returns them there upon freeing, resetting the size to +have them land in the appropriate bucket. + +As an alternative, `shared_ptr` is a standard template library solution +to the same problem. However, `shared_ptr` causes two memory +allocations for payloads that are longer than 16 bytes, and it has a minimum of +36 bytes length (+payload length+4 if more than 16 bytes). + +Note that the input message could be split between two shared buffer +ownerships. This means that the queue entry needs two buffer pointers, an +offset, and a length. We could use the buffer base next pointers to string up +buffers that have data from the same message, even if it's more than two. This +way we only need one buffer pointer. We have to assume that the respective +bytes always go together. + +It might make sense to support appending another message to the end of the +buffers. This be especially true if the last buffer pointer is just +partially used up, and thus the bufferptr at the end of the string of +buffers is the same as the incoming next buffer. + +### Input segmentation + +When data arrives from the socket to be read, we will allocate a shareable +buffer, then execute the asynchronous read. As the read completes, the input +data will be passed on to the segmenter. The goal of the segmenter is to find +the boundary of the message, for gridconnect the `: ... ;` delimiter, and on +native OpenLCB-TCP the binary length of the message. Then the message can be +passed on to routing. + +It is possible that during segmentation we start with one ref of a buffer, and +output two independent refs of the same buffer. This happens if a single kernel +read ends up with more than one message, which is rather typical in +GridConnect-TCP, but possibly even in GridConnect-USB. + +It is also possible that the segmenter will retain a ref of the last read +buffer, waiting for the completion of the message that is present therein. + +We must keep reading bytes from the hardware until the segmenter is happy to +send at least one packet onwards. Only thereafter should we send the packet (or +consult the admission controller). It is essential that a partial packet must +never be sent to the hub, because it is not guaranteed that we get the +completion of that packet before another port might try to send a different +packet. We can not interleave data from different packets, that would be an +unparseable outputs. + +Implementation note: + +There are several things that have to happen on the ingress port, and the order +in which we do these matters: + +- allocate a BarrierNotificable* for accompanying the buffer. +- allocate a (1460-byte) buffer for the `::read` call. +- perform the `::read` +- call the segmenter (which might result in additional buffers needed and + additional `::read` calls to be made) +- (not implemented yet) consult the admission controller on whether we are + allowed to send. +- send the message to the hub. + +The above list is the current order. There is one suboptimal part, which is +that we allocate a buffer earlier than when we know that there is data to read +from the fd or socket. We could theoretically wait until the fd is selectable +for read, and only then perform the buffer allocation. With the admission +controller this will get even more complicated. + +### Legacy connection + +We have two reasons to interact with a legacy `CanHub`: +- Running an OpenMRN `IfCan` and a `Node` requires this object to communicate + with the external world. +- Interacting with a hardware CAN controller via SocketCan or via OpenMRN + native CAN controller drivers can be done via `struct can_frame` today, and + the implementation is in `HubDeviceSelect`. + +To support these use-cases, there is a legacy bridge, which connects a +GridConnect typed `DirectHub` to a `CanHub`. It takes care of parsing the +GridConnect messages in one direction, formatting them in the other direction, +and the bridges the differences between the APIs. + +When many CAN frames are generated consecutively, they typically get rendered +into a single text buffer. However, they don't typically get sent off without +a yield inbetween. + +## Future features + +**WARNING** These features are not currently implemented. They are described +here with requirements to guide a future implementation. + +### Admission controller (not yet implemented) + +When a caller has a packet to send, it goes first through an admission +controller. The admission controller is specific to the source port. If the +admission controller rejects the packet, we hold the caller, and save the +notification the caller gave us to let the caller know when they can +proceed. + +QQ is it possible that a single source has more than one packet to send us, +possibly at different priorities, and the admission controller would block +one then allow a different? Probably yes; a priority based traffic selector +behaves like this. We must ensure that traffic of the same priority and +src/dst pair always is queued and never independently presented to the +admission controller. + +Failed requests of the admission controller have to line up in a queue like +the Async Allocator. When there is a token available, the queue front +should be woken up / notified. This gives a constraint on what the class +interface of the incoming request should be: an Executable has an +alloc_result() call. + +The most straightforward implementation of an admission controller is to +keep track of the number of inflight objects. This automatically pushes +back on the src-dst pairs that generate too high traffic even in the face +of smart routing. + +There is also no admission control for CAN hardware, because we do not have the +ability to push back on traffic. + +When admission control has woken up an input stream, that does not mean the +stream has the ability to execute directly on the hub. The hub might be busy. + +QQ how long do we need to keep around the existence of the pending object? +Ideally exactly until the pending object fully leaves the memory of the +device. If there is an outgoing assembly buffer that collects bytes of outgoing +data, we must ensure that this outgoing buffer is also kept in +consideration. However, so long as there is empty space in this buffer we +should not prevent further ingress. Once there is no empty space in the output +buffer, we have to limit the number of items that queue up to be >= the maximum +single-source input entries. This will cause pushback on the ingress path. This +means that after the buffer is complete, we still have to queue some packets. + +**Current State:** Since the admission controller is not implemented, each call +to the DirectHub will be enqueued on a first-come-first-served basis. One call +will be one GridConnect packet. A call to the hub never blocks, calls are +enqueued only if they are concurrect from different threads, which doesn't +typically happen when there is one main executor. One source port will perform +as many calls as it can from a single buffer -- until the segmenter says the +message in the buffer is partial. This is typically 1460 bytes +(`config_directhub_port_incoming_buffer_size()`). After that the port will +attempt to allocate a new buffer, which will make it pause. Each port can have +at most 2 buffers in flight +(`config_directhub_port_max_incoming_packets()`). At this point another port +can perform writes to the hub. When multiple ports are sending their traffic +concurrently, on the output we'll have roughly 1.5 kbytes from one port, then +1.5 kbyte from another, etc. If only one port is sending a lot of traffic, and +another wants to send just one packet, then typically 3 kbytes of traffic has +to drain from the one port before the other can send its packet. Since nothing +queues at the source port, it is possible for the stack to perform +prioritization of the packets against each other, for example when one source +port is sending a stream, while another sends a CAN control frame or an event. + +### Connecting DirectHubs with each other (not yet implemented) + +It is pretty important to have the Exit API compatible with the Entry API in +case we have a bridge that connects to different Hubs with a data conversion +inbetween. +However, there is no admission control between different Hubs. + +For hubs that are 1:1 in their representation of messages, a synchronous +execution is desirable. This might be held up if the specific flow is busy +though. There can be a race condition where inputs from two different but +bidirectionally interconnected hubs are both receiving inbound packets, both +busy, and have to queue for accessing each other -- a deadlock. + +Option 1) to work around this is to have 1:1 connected hubs not have +independent free/busy states, but share that state. This way all enquueing +happens on the entry point where separately defined flows or executors ensure +queueing is possible. + +Option 2) to work around is to decouple the exit from the original flow if we +fail to acquire the target hub; this could be implemented by creating a +CallbackExecutable on the heap and handing out ownership of the buffer to it. + +The conclusion from both of these is that the exit API is synchronous. The exit +target is responsible for any queueing that needs to happen. This is very much +like the current FlowInterface<>. + +### Routing (not yet implemented) + +The first thing the runner flow should determine is if we have a broadcast or +unicast packet. For unicast packet we will have to look up directly the output +port that it should go to. For broadcast packet we have to iterate. The output +port might have to go through an indirection: some output ports are going to be +represented by a bridge (e.g. CAN to TCP bridge) first and another routing hub +on the far side of it. + +### Alternate output buffering (to be decided) + +See background in section Output buffering. + +We have two additional alternatives on how to handle this output buffer: + +1) have a fixed memory pre-allocated, and as packets arrive at the output +port, we copy the bytes into this memory. + +2) We queue up the packets in the output port with their original buffer, +keeping a ref to those buffers, and when we decide to flush, we allocate +the output buffer, copy the bytes into it, send off the data, then free the +output buffer. + +Option 2 uses less memory because the half empty output buffer is not kept +around. Both options have the same number of memory copies (we cannot avoid +the etra copy unless we have some scatter-gather DMA thing). Option 1 is +easier for understanding when the original source admission controller can +release the token. Option 2 allows high priority packets to jump to the +front of the queue just before the flush is happening. Option 1 takes less +memory in that broadcast packets do not need to be queued at each single +output port. + +QQ. How does the admission controller keep track of when the message really +exited the system? We must keep it in mind that the message might have been +translated to a differen physical format, and the memory for the specific +byte buffer be freed. + +### Flushing (not implemented yet) + +There should be a "flush" porperty of a message, which should cause +outgoing buffers to be immediately transmitted in order to achieve lower +roundtrip latency of important requests. We can be smart about what to +flush: +- last segment of datagrams (:X1Axxx, :X1Dxxx) +- last segments of stream writes (:X1F that has less than 8 data bytes) +- datagram ACK and stream proceed messages +- loco function control messages +- emergency stop messages + +Current state: the isFlush_ attribute is there, but it is not filled in by the +traffic sources and not used in the sinks. Since the sinks don't hold data with +an output buffer / timerat this time, there is nothing to do really with this +information. + +### Bridges (not yet implemented) + +In a real CAN-TCP router we will need to have three separate instances of +this router type: one for TCP messages, one for CAN frames, one for +GridConnect text. + +The respective instances have to be connected by bridges. A bridge is an +output port from one router and an input port to another router, allocates +memory for the data type conversion, but does not perform admisson control +(this is because admission control is end to end). The bridge might be a +state flow, because for ->CAN messages the bridge has to perform alias +lookups or allocation. + +It is unclear whether we also need routers / bridges for internal only +reference of messages (e.g. Buffer). Is the +AddressedMessageWriteFlow a component on the If side or is it a component +that is part of the connecting bridge? It has to exist in the bridge for +the purpose of routing messages between different types of Ifs. + +### Route lookups (not yet implemented) + +When a message shows up in a router, we have to perform a lookup to decide +where it goes. The output of this lookup is either "everywhere" (broadcast +message), which means every output except the same as where it came +from. The other alternative is "to a specific target" which is then one +single port. That port might not be on the current router though. Thus we +need a secondary routing table as well, which maps HubSources to ports or +bridges in the current, type-specific router. This secondary routing table +exists independently for each router; however, the primary routing table +only exists once globally. + +In every case we will test the packet just before output against the +specific HubInput that it came from in order to avoid echoing it back. The +HubInput may have to be changed between specific routers; or alternatively +we could use the secondary lookup table for the current router. It is +sufficient to perform that secondary lookup only once, as the current +router starts to process the message, if it has been identified that it is +a broadcast message. This basically means that the skipMember (source) can +be a class variable inside the hub flow itself. + +The bridge could actually also directly represent the skipMember when +sending the packet to the target router. However, we want to make sure the +enqueueing of the message does not cause a loss of information. We could +have the enqueueing be based on an execution of a piece of code. diff --git a/src/utils/DirectHubGc.cxx b/src/utils/DirectHubGc.cxx new file mode 100644 index 000000000..b6582f2fa --- /dev/null +++ b/src/utils/DirectHubGc.cxx @@ -0,0 +1,148 @@ +/** \copyright + * Copyright (c) 2020, Balazs Racz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * \file DirectHubGc.cxx + * + * GridConnect support for DirectHub. + * + * @author Balazs Racz + * @date 1 Mar 2020 + */ + +#include "utils/DirectHub.hxx" + +/// Message segmenter that chops incoming byte stream into gridconnect packets. +class DirectHubGcSegmenter : public MessageSegmenter +{ +public: + DirectHubGcSegmenter() + { + clear(); + } + + ssize_t segment_message(const void *d, size_t size) override + { + if (!size) + { + return 0; // nothing to do. + } + const char *data = static_cast(d); + if (packetLen_ == 0) + { + // beginning of packet. + isGcPacket_ = data[0] == ':'; + } + size_t ofs = 0; + if (isGcPacket_) + { + // looking for terminating ; + while ((ofs < size) && (data[ofs] != ';')) + { + ++ofs; + } + if (ofs < size) + { + // found the terminating ; + ++ofs; + // append any garbage we still have. + while ((ofs < size) && (data[ofs] != ':')) + { + ++ofs; + } + packetLen_ += ofs; + return packetLen_; + } + else + { + // ran out of payload without finding terminating ; + packetLen_ += size; + return 0; + } + } + else + { + // Looking for starting ':' + while ((ofs < size) && (data[ofs] != ':')) + { + ++ofs; + } + packetLen_ += ofs; + if (ofs < size) + { + // found it + return packetLen_; + } + else + { + return 0; + } + } + } + + /// Resets internal state machine. The next call to segment_message() + /// assumes no previous data present. + void clear() override + { + isGcPacket_ = false; + packetLen_ = 0; + } + +private: + /// True if the current packet is a gridconnect packet; false if it is + /// garbage. + uint32_t isGcPacket_ : 1; + + /// How many bytes long this packet is. + uint32_t packetLen_ : 30; +}; + +MessageSegmenter *create_gc_message_segmenter() +{ + return new DirectHubGcSegmenter(); +} + +/// Message segmenter that keeps each packet as-is. +class DirectHubTrivialSegmenter : public MessageSegmenter +{ +public: + ssize_t segment_message(const void *d, size_t size) override + { + total_ += size; + LOG(VERBOSE, "segment %zu total %zu", size, total_); + return size; + } + + void clear() override + { + } + + size_t total_ {0}; +}; + +MessageSegmenter *create_trivial_message_segmenter() +{ + return new DirectHubTrivialSegmenter(); +} diff --git a/src/utils/DirectHubGc.cxxtest b/src/utils/DirectHubGc.cxxtest new file mode 100644 index 000000000..1a77931a7 --- /dev/null +++ b/src/utils/DirectHubGc.cxxtest @@ -0,0 +1,97 @@ +#include "utils/DirectHub.hxx" + +#include "utils/test_main.hxx" + +class GridConnectSegmenterTest : public ::testing::Test +{ +protected: + ssize_t send_some_data(const string &payload) + { + return segmenter_->segment_message(payload.data(), payload.size()); + } + + void clear() + { + segmenter_->clear(); + } + + std::unique_ptr segmenter_ { + create_gc_message_segmenter()}; +}; + +TEST_F(GridConnectSegmenterTest, create) +{ +} + +TEST_F(GridConnectSegmenterTest, single_message) +{ + auto len = send_some_data(":X1;"); + EXPECT_EQ(4, len); + clear(); + + // with two characters of trailing garbage + len = send_some_data(":X1; \n"); + EXPECT_EQ(6, len); + clear(); +} + +TEST_F(GridConnectSegmenterTest, split_message) +{ + auto len = send_some_data(":X1"); + EXPECT_EQ(0, len); + + len = send_some_data("2;"); + EXPECT_EQ(5, len); + clear(); + + // with one character of trailing garbage + len = send_some_data(":X1"); + EXPECT_EQ(0, len); + + len = send_some_data("2;\n"); + EXPECT_EQ(6, len); + clear(); +} + +TEST_F(GridConnectSegmenterTest, many_split_message) +{ + auto len = send_some_data(":X1"); + EXPECT_EQ(0, len); + + EXPECT_EQ(0, send_some_data("95B")); + EXPECT_EQ(0, send_some_data("4123N")); + EXPECT_EQ(0, send_some_data("01020304")); + EXPECT_EQ(0, send_some_data("0506")); + EXPECT_EQ(0, send_some_data("07")); + EXPECT_EQ(28, send_some_data("08;")); +} + +TEST_F(GridConnectSegmenterTest, two_messages) +{ + auto len = send_some_data(":X12;:X345;"); + EXPECT_EQ(5, len); + clear(); + len = send_some_data(":X345;"); + EXPECT_EQ(6, len); + clear(); +} + +TEST_F(GridConnectSegmenterTest, two_messages_with_break) +{ + auto len = send_some_data(":X12;\n:X35;"); + EXPECT_EQ(6, len); + clear(); + len = send_some_data(":X35;"); + EXPECT_EQ(5, len); + clear(); +} + +TEST_F(GridConnectSegmenterTest, leading_garbage) +{ + auto len = send_some_data(" :X12;\n"); + EXPECT_EQ(3, len); + clear(); + len = send_some_data(":X12;\n"); + EXPECT_EQ(6, len); + clear(); +} diff --git a/src/utils/DirectHubLegacy.cxx b/src/utils/DirectHubLegacy.cxx new file mode 100644 index 000000000..3dec84c55 --- /dev/null +++ b/src/utils/DirectHubLegacy.cxx @@ -0,0 +1,187 @@ +/** \copyright + * Copyright (c) 2020, Balazs Racz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * \file DirectHubLegacy.cxx + * + * Connection from a DirectHub to a legacy hub. + * + * @author Balazs Racz + * @date 5 Mar 2020 + */ + +#include "utils/DirectHub.hxx" +#include "utils/Hub.hxx" +#include "utils/gc_format.h" + +extern DataBufferPool g_direct_hub_kbyte_pool; + +/// Bridge component that converts the outgoing CAN packets into gridconnect +/// format and enqueues them into the DirectHub for sending. +class HubToGcPort : public CanHubPort, public DirectHubPort +{ +public: + HubToGcPort(DirectHubInterface *gc_hub, CanHubFlow *can_hub) + : CanHubPort(gc_hub->get_service()) + , targetHub_(gc_hub) + , sourceHub_(can_hub) + { + sourceHub_->register_port(this); + targetHub_->register_port(this); + } + + ~HubToGcPort() + { + sourceHub_->unregister_port(this); + targetHub_->unregister_port(this); + } + + /// Handles the next CAN packet that we need to send. + Action entry() override + { + // Allocates output buffer if needed. + if (buf_.free() < MIN_GC_FREE) + { + // Need more output buffer. + DataBuffer *b; + g_direct_hub_kbyte_pool.alloc(&b); + buf_.append_empty_buffer(b); + } + // Generates gridconnect message and commits to buffer. + char *start = (char *)buf_.data_write_pointer(); + char *end = gc_format_generate(message()->data(), start, 0); + packetSize_ = end - start; + buf_.data_write_advance(packetSize_); + pktDone_ = message()->new_child(); + release(); + // Sends off output message. + wait_and_call(STATE(do_send)); + inlineRun_ = true; + inlineComplete_ = false; + targetHub_->enqueue_send(this); + inlineRun_ = false; + if (inlineComplete_) + { + return exit(); + } + else + { + return wait(); + } + } + + /// Handles the callback from the direct hub when it is ready for us to + /// send the message. + Action do_send() + { + auto *m = targetHub_->mutable_message(); + m->buf_ = buf_.transfer_head(packetSize_); + m->source_ = (DirectHubPort *)this; + m->done_ = pktDone_; + targetHub_->do_send(); + if (inlineRun_) + { + inlineComplete_ = true; + return wait(); + } + else + { + return exit(); + } + } + + /// GC to binary path. Called by the DirectHub with a text packet or a + /// garbage packet. + void send(MessageAccessor *msg) override + { + auto &buf = msg->buf_; + uint8_t *p; + unsigned available; + buf.head()->get_read_pointer(buf.skip(), &p, &available); + if (buf.size() == 0 || *p != ':') + { + // Not a gridconnect packet. Do not do anything. + return; + } + Buffer *can_buf = sourceHub_->alloc(); + if (msg->done_) + { + can_buf->set_done(msg->done_->new_child()); + } + can_buf->data()->skipMember_ = (CanHubPort *)this; + const char *text_packet = nullptr; + string assembled_packet; + if (available == buf.size()) + { + // One block of data. Convert in place. + text_packet = (const char *)p; + } + else + { + buf.append_to(&assembled_packet); + text_packet = assembled_packet.c_str(); + } + if (gc_format_parse(text_packet, can_buf->data()) < 0) + { + string debug(text_packet, buf.size()); + LOG(INFO, "Failed to parse gridconnect packet: '%s'", + debug.c_str()); + can_buf->unref(); + return; + } + /// @todo consider if we need to set the priority here. + sourceHub_->send(can_buf, 0); + } + +private: + /// Output buffer of gridconnect bytes that will be sent to the GC + /// DirectHub. + LinkedDataBufferPtr buf_; + /// Where to send the target data. + DirectHubInterface *targetHub_; + /// Done notifiable from the source packet. + BarrierNotifiable *pktDone_ = nullptr; + /// Hub where we get the input data from (registered). + CanHubFlow *sourceHub_; + /// The source pointer we need to use for sending messages to the target + /// hub. + HubSource *me_; + /// True while we are calling the target hub send method. + bool inlineRun_ : 1; + /// True if the send completed inline. + bool inlineComplete_ : 1; + /// Number of bytes this gridconnect packet is. + uint16_t packetSize_; + /// Minimum amount of free bytes in the current send buffer in order to use + /// it for gridconnect rendering. + static constexpr unsigned MIN_GC_FREE = 29; +}; + +Destructable *create_gc_to_legacy_can_bridge( + DirectHubInterface *gc_hub, CanHubFlow *can_hub) +{ + + return new HubToGcPort(gc_hub, can_hub); +} diff --git a/src/utils/HubDeviceSelect.hxx b/src/utils/HubDeviceSelect.hxx index f913c6b4d..9080297c8 100644 --- a/src/utils/HubDeviceSelect.hxx +++ b/src/utils/HubDeviceSelect.hxx @@ -187,7 +187,8 @@ public: } } - /// Unregisters the current flow from the hub. + /// Unregisters the current flow from the hub. Must be called on the main + /// executor. void shutdown() { auto *e = this->service()->executor(); diff --git a/src/utils/Queue.hxx b/src/utils/Queue.hxx index bf7819c5d..fd7ac9317 100644 --- a/src/utils/Queue.hxx +++ b/src/utils/Queue.hxx @@ -343,6 +343,15 @@ public: Result next() { AtomicHolder h(impl_.lock()); + return next_locked(); + } + + /** Get an item from the front of the queue. Caller must hold lock(). + * @return @ref Result structure with item retrieved from queue, NULL if + * no item available + */ + Result next_locked() + { return waiting ? Result() : impl_.next(); } diff --git a/src/utils/RingBuffer.hxx b/src/utils/RingBuffer.hxx index 7240971ba..a253f3d4d 100644 --- a/src/utils/RingBuffer.hxx +++ b/src/utils/RingBuffer.hxx @@ -37,7 +37,7 @@ #include #include "utils/macros.h" -/** Implements a vanilla ring buffer. +/** Implements a vanilla ring buffer. Not thread safe. */ template class RingBuffer { @@ -110,7 +110,90 @@ public: count -= removed; return removed; } - + + /** Get a reference to the current location in the buffer for read. + * @param buf location to store resulting reference + * @return number of items in contiguous memory. May be less than total + * number of items in the buffer. + */ + size_t data_read_pointer(T **buf) + { + size_t result = size - readIndex; + if (count < result) + { + result = count; + } + *buf = data + readIndex; + return result; + } + + /** Get a reference to the current location in the buffer for write. + * @param buf location to store resulting reference + * @return amount of space in contiguous memory. May be less than total + * amount of space avaiable. + */ + size_t data_write_pointer(T **buf) + { + size_t result = size - writeIndex; + if (space() < result) + { + result = space(); + } + *buf = data + writeIndex; + return result; + } + + /** Remove a number of items from the buffer by advancing the readIndex. + * @param items total number of items to remove + * @return total number of items removed + */ + size_t consume(size_t items) + { + if (items > count) + { + items = count; + } + size_t consumed = items; + count -= items; + if ((readIndex + items) >= size) + { + items -= (size - readIndex); + readIndex = 0; + } + readIndex += items; + + // Try to align the buffer so that we have the most available space to + // write. + if (!count) + { + readIndex = writeIndex = 0; + } + + return consumed; + } + + /** Add a number of items to the buffer by advancing the writeIndex. + * @param items total number of items to add + * @return total number of items added + */ + size_t advance(size_t items) + { + if (items > space()) + { + items = space(); + } + size_t added = items; + count += items; + if ((writeIndex + items) >= size) + { + items -= (size - writeIndex); + writeIndex = 0; + } + writeIndex += items; + + return added; + } + /** Number of items in the buffer. * @return number of items in the buffer */ diff --git a/src/utils/constants.cxx b/src/utils/constants.cxx index a6553a47c..de57b39d3 100644 --- a/src/utils/constants.cxx +++ b/src/utils/constants.cxx @@ -159,6 +159,12 @@ DEFAULT_CONST(gridconnect_tcp_notsent_lowat_buffer_size, 1); DEFAULT_CONST_FALSE(gridconnect_tcp_use_select); +// By default read a full TCP packet from the input port in one go. +DEFAULT_CONST(directhub_port_incoming_buffer_size, 1460); +// how many 1460-byte packets per port we parse before waiting for output to +// drain. +DEFAULT_CONST(directhub_port_max_incoming_packets, 2); + #ifdef ESP_PLATFORM /// Use a stack size of 3kb for SocketListener tasks. DEFAULT_CONST(socket_listener_stack_size, 3072); diff --git a/src/utils/constants.hxx b/src/utils/constants.hxx index 19121567f..07b46ad8c 100644 --- a/src/utils/constants.hxx +++ b/src/utils/constants.hxx @@ -95,7 +95,36 @@ const int _sym_##name = value; \ EXTERNCEND -#else // native C +#ifdef GTEST + +/// Use this macro at the top of a .cxxtest file to allow overriding constant +/// values in that test. +/// +/// @param name the constant name +/// @param start_value an integer; this value will be used for each test if no +/// specific value is given. It is ideal if this matches what is as +/// DEFAULT_CONST in constants.cxx, but there is no mechanism to keep them in +/// sync. +#define TEST_CONST(name, start_value) \ + EXTERNC extern const int _sym_##name; \ + const int __attribute__((section(".data"))) _sym_##name = start_value; \ + EXTERNCEND \ + int *config##name##override() \ + { \ + return (int *)&_sym_##name; \ + } + +/// Call this macro at the beginning of a test function, or inside a test +/// fixture class to set the constant's value for that test. +/// @param name the name of the constant +/// @param new_value an integer, the constant will be overridden to this value +/// during the given scope. +#define TEST_OVERRIDE_CONST(name, new_value) \ + ScopedOverride ov##name{config##name##override(), new_value} + +#endif // GTEST + +#else // not simple const, but rather use direct asm / linking statements #define DECLARE_CONST(name) \ EXTERNC extern char _sym_##name; \ diff --git a/src/utils/gc_format.cxx b/src/utils/gc_format.cxx index 6d6a61aaa..6fcce47f8 100644 --- a/src/utils/gc_format.cxx +++ b/src/utils/gc_format.cxx @@ -247,7 +247,8 @@ char* gc_format_generate(const struct can_frame* can_frame, char* buf, int doubl output(buf, nibble_to_ascii(can_frame->data[offset] & 0xf)); } output(buf, ';'); - if (config_gc_generate_newlines() == CONSTANT_TRUE) { + if (config_gc_generate_newlines() == CONSTANT_TRUE) + { output(buf, '\n'); } return buf; diff --git a/src/utils/sources b/src/utils/sources index f24fdbf50..93175a4c1 100644 --- a/src/utils/sources +++ b/src/utils/sources @@ -4,35 +4,38 @@ CSRCS += errno_exit.c \ ieeehalfprecision.c CXXSRCS += \ - Base64.cxx \ - Blinker.cxx \ - CanIf.cxx \ - ClientConnection.cxx \ - Crc.cxx \ - StringPrintf.cxx \ - Buffer.cxx \ - ConfigUpdateListener.cxx \ - FdUtils.cxx \ - FileUtils.cxx \ - ForwardAllocator.cxx \ - GcStreamParser.cxx \ - GcTcpHub.cxx \ - GridConnect.cxx \ - GridConnectHub.cxx \ - HubDevice.cxx \ - HubDeviceSelect.cxx \ - JSHubPort.cxx \ - Queue.cxx \ - ReflashBootloader.cxx \ - ServiceLocator.cxx \ - Stats.cxx \ - SocketCan.cxx \ - SocketClient.cxx \ - constants.cxx \ - format_utils.cxx \ - gc_format.cxx \ - logging.cxx \ - socket_listener.cxx \ + Base64.cxx \ + Blinker.cxx \ + Buffer.cxx \ + CanIf.cxx \ + ClientConnection.cxx \ + ConfigUpdateListener.cxx \ + Crc.cxx \ + DirectHub.cxx \ + DirectHubGc.cxx \ + DirectHubLegacy.cxx \ + FdUtils.cxx \ + FileUtils.cxx \ + ForwardAllocator.cxx \ + GcStreamParser.cxx \ + GcTcpHub.cxx \ + GridConnect.cxx \ + GridConnectHub.cxx \ + HubDevice.cxx \ + HubDeviceSelect.cxx \ + JSHubPort.cxx \ + Queue.cxx \ + ReflashBootloader.cxx \ + ServiceLocator.cxx \ + Stats.cxx \ + SocketCan.cxx \ + SocketClient.cxx \ + StringPrintf.cxx \ + constants.cxx \ + format_utils.cxx \ + gc_format.cxx \ + logging.cxx \ + socket_listener.cxx \ CXXTESTSRCS += BufferQueue.cxxtest \