Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "improved rcl_wait in the area of timeout computation and spur… #1142

Merged
merged 1 commit into from
Mar 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 1 addition & 25 deletions rcl/include/rcl/timer.h
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ rcl_timer_call_with_info(rcl_timer_t * timer, rcl_timer_call_info_t * call_info)
RCL_PUBLIC
RCL_WARN_UNUSED
rcl_ret_t
rcl_timer_clock(const rcl_timer_t * timer, rcl_clock_t ** clock);
rcl_timer_clock(rcl_timer_t * timer, rcl_clock_t ** clock);

/// Calculates whether or not the timer should be called.
/**
Expand Down Expand Up @@ -387,30 +387,6 @@ RCL_WARN_UNUSED
rcl_ret_t
rcl_timer_get_time_until_next_call(const rcl_timer_t * timer, int64_t * time_until_next_call);

/// Retrieve the time when the next call to rcl_timer_call() shall occur.
/**
* <hr>
* Attribute | Adherence
* ------------------ | -------------
* Allocates Memory | No
* Thread-Safe | Yes
* Uses Atomics | Yes
* Lock-Free | Yes [1]
* <i>[1] if `atomic_is_lock_free()` returns true for `atomic_int_least64_t`</i>
*
* \param[in] timer the handle to the timer that is being queried
* \param[out] next_call_time the output variable for the result
* \return #RCL_RET_OK if the timer until next call was successfully calculated, or
* \return #RCL_RET_INVALID_ARGUMENT if any arguments are invalid, or
* \return #RCL_RET_TIMER_INVALID if the timer->impl is invalid, or
* \return #RCL_RET_TIMER_CANCELED if the timer is canceled, or
* \return #RCL_RET_ERROR an unspecified error occur.
*/
RCL_PUBLIC
RCL_WARN_UNUSED
rcl_ret_t
rcl_timer_get_next_call_time(const rcl_timer_t * timer, int64_t * next_call_time);

/// Retrieve the time since the previous call to rcl_timer_call() occurred.
/**
* This function calculates the time since the last call and copies it into
Expand Down
18 changes: 1 addition & 17 deletions rcl/src/rcl/timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ rcl_timer_fini(rcl_timer_t * timer)
RCL_PUBLIC
RCL_WARN_UNUSED
rcl_ret_t
rcl_timer_clock(const rcl_timer_t * timer, rcl_clock_t ** clock)
rcl_timer_clock(rcl_timer_t * timer, rcl_clock_t ** clock)
{
RCL_CHECK_ARGUMENT_FOR_NULL(timer, RCL_RET_INVALID_ARGUMENT);
RCL_CHECK_ARGUMENT_FOR_NULL(clock, RCL_RET_INVALID_ARGUMENT);
Expand Down Expand Up @@ -343,22 +343,6 @@ rcl_timer_is_ready(const rcl_timer_t * timer, bool * is_ready)
return RCL_RET_OK;
}

rcl_ret_t
rcl_timer_get_next_call_time(const rcl_timer_t * timer, int64_t * next_call_time)
{
RCL_CHECK_ARGUMENT_FOR_NULL(timer, RCL_RET_INVALID_ARGUMENT);
RCL_CHECK_ARGUMENT_FOR_NULL(timer->impl, RCL_RET_TIMER_INVALID);
RCL_CHECK_ARGUMENT_FOR_NULL(next_call_time, RCL_RET_INVALID_ARGUMENT);

if (rcutils_atomic_load_bool(&timer->impl->canceled)) {
return RCL_RET_TIMER_CANCELED;
}

*next_call_time =
rcutils_atomic_load_int64_t(&timer->impl->next_call_time);
return RCL_RET_OK;
}

rcl_ret_t
rcl_timer_get_time_until_next_call(const rcl_timer_t * timer, int64_t * time_until_next_call)
{
Expand Down
103 changes: 25 additions & 78 deletions rcl/src/rcl/wait.c
Original file line number Diff line number Diff line change
Expand Up @@ -541,100 +541,47 @@ rcl_wait(rcl_wait_set_t * wait_set, int64_t timeout)
// By default, set the timer to block indefinitely if none of the below conditions are met.
rmw_time_t * timeout_argument = NULL;
rmw_time_t temporary_timeout_storage;
bool is_timer_timeout = false;

for (uint64_t t_idx = 0; t_idx < wait_set->impl->timer_index; ++t_idx) {
if (!wait_set->timers[t_idx]) {
continue; // Skip NULL timers.
}
rmw_guard_conditions_t * rmw_gcs = &(wait_set->impl->rmw_guard_conditions);
size_t gc_idx = wait_set->size_of_guard_conditions + t_idx;
if (NULL != rmw_gcs->guard_conditions[gc_idx]) {
// This timer has a guard condition, so move it to make a legal wait set.
rmw_gcs->guard_conditions[rmw_gcs->guard_condition_count] =
rmw_gcs->guard_conditions[gc_idx];
++(rmw_gcs->guard_condition_count);
}
}

if (timeout == 0) {
// Then it is non-blocking, so set the temporary storage to 0, 0 and pass it.
temporary_timeout_storage.sec = 0;
temporary_timeout_storage.nsec = 0;
timeout_argument = &temporary_timeout_storage;
} else {
int64_t min_next_call_time[RCL_STEADY_TIME + 1];
rcl_clock_t * clocks[RCL_STEADY_TIME + 1] = {0, 0, 0, 0};

min_next_call_time[RCL_ROS_TIME] = INT64_MAX;
min_next_call_time[RCL_SYSTEM_TIME] = INT64_MAX;
min_next_call_time[RCL_STEADY_TIME] = INT64_MAX;

for (size_t t_idx = 0; t_idx < wait_set->impl->timer_index; ++t_idx) {
if (!wait_set->timers[t_idx]) {
bool is_timer_timeout = false;
int64_t min_timeout = timeout > 0 ? timeout : INT64_MAX;
{ // scope to prevent i from colliding below
uint64_t i = 0;
for (i = 0; i < wait_set->impl->timer_index; ++i) {
if (!wait_set->timers[i]) {
continue; // Skip NULL timers.
}

rcl_clock_t * clock;
if (rcl_timer_clock(wait_set->timers[t_idx], &clock) != RCL_RET_OK) {
// should never happen
return RCL_RET_ERROR;
rmw_guard_conditions_t * rmw_gcs = &(wait_set->impl->rmw_guard_conditions);
size_t gc_idx = wait_set->size_of_guard_conditions + i;
if (NULL != rmw_gcs->guard_conditions[gc_idx]) {
// This timer has a guard condition, so move it to make a legal wait set.
rmw_gcs->guard_conditions[rmw_gcs->guard_condition_count] =
rmw_gcs->guard_conditions[gc_idx];
++(rmw_gcs->guard_condition_count);
}

if (clock->type == RCL_ROS_TIME) {
bool timer_override_active = false;
if (rcl_is_enabled_ros_time_override(clock, &timer_override_active) != RCL_RET_OK) {
// should never happen
return RCL_RET_ERROR;
}

if (timer_override_active) {
// if the timer override is active, there is no point in computing a wait time,
// as it might be on a total wrong time basis. In case this timer becomes ready,
// the guard_condition above will wake us.
continue;
}
}

clocks[clock->type] = clock;

// get the time of the next call to the timer
int64_t next_call_time = INT64_MAX;
rcl_ret_t ret = rcl_timer_get_next_call_time(wait_set->timers[t_idx], &next_call_time);
// use timer time to to set the rmw_wait timeout
// TODO(sloretz) fix spurious wake-ups on ROS_TIME timers with ROS_TIME enabled
int64_t timer_timeout = INT64_MAX;
rcl_ret_t ret = rcl_timer_get_time_until_next_call(wait_set->timers[i], &timer_timeout);
if (ret == RCL_RET_TIMER_CANCELED) {
wait_set->timers[t_idx] = NULL;
wait_set->timers[i] = NULL;
continue;
}
if (ret != RCL_RET_OK) {
return ret; // The rcl error state should already be set.
}
if (next_call_time < min_next_call_time[clock->type]) {
min_next_call_time[clock->type] = next_call_time;
}
}

int64_t min_timeout = timeout > 0 ? timeout : INT64_MAX;

// determine the min timeout of all clocks
for (size_t i = RCL_ROS_TIME; i <= RCL_STEADY_TIME; i++) {
if (clocks[i] == 0) {
continue;
}

int64_t cur_time;
rmw_ret_t ret = rcl_clock_get_now(clocks[i], &cur_time);
if (ret != RCL_RET_OK) {
return ret; // The rcl error state should already be set.
}

int64_t timer_timeout = min_next_call_time[i] - cur_time;

if (timer_timeout < min_timeout) {
is_timer_timeout = true;
min_timeout = timer_timeout;
}
}
}

if (timeout == 0) {
// Then it is non-blocking, so set the temporary storage to 0, 0 and pass it.
temporary_timeout_storage.sec = 0;
temporary_timeout_storage.nsec = 0;
timeout_argument = &temporary_timeout_storage;
} else if (timeout > 0 || is_timer_timeout) {
// If min_timeout was negative, we need to wake up immediately.
if (min_timeout < 0) {
min_timeout = 0;
Expand Down
116 changes: 0 additions & 116 deletions rcl/test/rcl/test_wait.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -321,122 +321,6 @@ TEST_F(WaitSetTestFixture, zero_timeout_overrun_timer) {
EXPECT_LE(diff, RCL_MS_TO_NS(50));
}

// Test rcl_wait with a timeout value and an overrun timer
TEST_F(WaitSetTestFixture, no_wakeup_on_override_timer) {
rcl_wait_set_t wait_set = rcl_get_zero_initialized_wait_set();
rcl_ret_t ret =
rcl_wait_set_init(&wait_set, 0, 0, 1, 0, 0, 0, context_ptr, rcl_get_default_allocator());
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;
OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT(
{
ret = rcl_wait_set_fini(&wait_set);
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;
});
rcl_clock_t clock;
rcl_allocator_t allocator = rcl_get_default_allocator();
ret = rcl_clock_init(RCL_ROS_TIME, &clock, &allocator);
OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT(
{
ret = rcl_clock_fini(&clock);
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;
});
ASSERT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;

ret = rcl_enable_ros_time_override(&clock);
ASSERT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;

rcl_timer_t timer = rcl_get_zero_initialized_timer();
ret = rcl_timer_init2(
&timer, &clock, this->context_ptr, RCL_MS_TO_NS(10), nullptr, rcl_get_default_allocator(),
true);
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;
OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT(
{
ret = rcl_timer_fini(&timer);
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;
});
ret = rcl_wait_set_add_timer(&wait_set, &timer, NULL);
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;

// Time spent during wait should be negligible, definitely less than the given timeout
std::chrono::steady_clock::time_point before_sc = std::chrono::steady_clock::now();
ret = rcl_wait(&wait_set, RCL_MS_TO_NS(100));
std::chrono::steady_clock::time_point after_sc = std::chrono::steady_clock::now();
// We don't expect a timeout here (since the guard condition had already been triggered)
ASSERT_EQ(RCL_RET_TIMEOUT, ret) << rcl_get_error_string().str;
int64_t diff = std::chrono::duration_cast<std::chrono::nanoseconds>(after_sc - before_sc).count();
EXPECT_GE(diff, RCL_MS_TO_NS(100));
}

// Test rcl_wait with a timeout value and an overrun timer
TEST_F(WaitSetTestFixture, wakeup_on_override_timer) {
rcl_wait_set_t wait_set = rcl_get_zero_initialized_wait_set();
rcl_ret_t ret =
rcl_wait_set_init(&wait_set, 0, 0, 1, 0, 0, 0, context_ptr, rcl_get_default_allocator());
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;
OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT(
{
ret = rcl_wait_set_fini(&wait_set);
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;
});
rcl_clock_t clock;
rcl_allocator_t allocator = rcl_get_default_allocator();
ret = rcl_clock_init(RCL_ROS_TIME, &clock, &allocator);
OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT(
{
ret = rcl_clock_fini(&clock);
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;
});
ASSERT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;

ret = rcl_enable_ros_time_override(&clock);
ASSERT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;

rcl_timer_t timer = rcl_get_zero_initialized_timer();
ret = rcl_timer_init2(
&timer, &clock, this->context_ptr, RCL_MS_TO_NS(10), nullptr, rcl_get_default_allocator(),
true);
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;
OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT(
{
ret = rcl_timer_fini(&timer);
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;
});
ret = rcl_wait_set_add_timer(&wait_set, &timer, NULL);
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;

// Time spent during wait should be negligible, definitely less than the given timeout
std::chrono::steady_clock::time_point before_sc = std::chrono::steady_clock::now();

rcl_ret_t wait_ret = RCL_RET_ERROR;
std::atomic_bool wait_done = false;
std::thread t([&wait_ret, &wait_set, &wait_done]() {
wait_ret = rcl_wait(&wait_set, RCL_MS_TO_NS(100));
wait_done = true;
});

ret = rcl_set_ros_time_override(&clock, RCL_MS_TO_NS(5));
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;

std::this_thread::sleep_for(std::chrono::milliseconds(15));
EXPECT_EQ(wait_done, false) << "Error, wait terminated to early";

ret = rcl_set_ros_time_override(&clock, RCL_MS_TO_NS(15));
EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;

std::this_thread::sleep_for(std::chrono::milliseconds(5));

EXPECT_EQ(wait_done, true) << "Error, wait waken up by time jump";

t.join();

std::chrono::steady_clock::time_point after_sc = std::chrono::steady_clock::now();
// We don't expect a timeout here (since the guard condition had already been triggered)
ASSERT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str;
int64_t diff = std::chrono::duration_cast<std::chrono::nanoseconds>(after_sc - before_sc).count();
EXPECT_LT(diff, RCL_MS_TO_NS(100));
}

// Check that a canceled timer doesn't wake up rcl_wait
TEST_F(WaitSetTestFixture, canceled_timer) {
rcl_wait_set_t wait_set = rcl_get_zero_initialized_wait_set();
Expand Down