Skip to content

Commit

Permalink
Merge pull request #64 from ltindall/delayed_data_handling
Browse files Browse the repository at this point in the history
Now gracefully updates tracks despite skipping MQTT messages.
  • Loading branch information
cglewis authored Jun 13, 2022
2 parents b5322f9 + 9c9fb0e commit 08ee14c
Show file tree
Hide file tree
Showing 3 changed files with 104 additions and 29 deletions.
16 changes: 9 additions & 7 deletions birdseye/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def __init__(self, sensor=None, actions=None, state=None, simulated=True):
self.last_observation = None
self.pf = None

def dynamics(self, particles, control=None, **kwargs):
def dynamics(self, particles, control=None, distance=None, course=None, heading=None, **kwargs):
"""Helper function for particle filter dynamics
Returns
Expand All @@ -34,7 +34,7 @@ def dynamics(self, particles, control=None, **kwargs):
for p in particles:
new_p = []
for t in range(self.state.n_targets):
new_p += self.state.update_state(p[4*t:4*(t+1)], control)
new_p += self.state.update_state(p[4*t:4*(t+1)], control=control, distance=distance, course=course, heading=heading)
#new_p = np.array([self.state.update_state(target_state, control) for target_state in p])
updated_particles.append(new_p)
return np.array(updated_particles)
Expand Down Expand Up @@ -92,22 +92,24 @@ def reset(self, num_particles=2000):
return env_obs

# returns observation, reward, done, info
def real_step(self, action, bearing):
#action = self.actions.index_to_action(action_idx)
def real_step(self, data):
#action = data['action_taken'] if data.get('action_taken', None) else (0,0)

# Update position of sensor
self.state.update_sensor(action, bearing=bearing)
self.state.update_real_sensor(data.get('distance', None), data.get('course', None), data.get('bearing', None))

# Get sensor observation
observation = self.sensor.real_observation()
observation = np.array(observation) if observation is not None else None

# Update particle filter
self.pf.update(observation, xp=self.pf.particles, control=action)
self.pf.update(observation, xp=self.pf.particles, distance=data.get('distance', None), course=data.get('course', None), heading=data.get('bearing', None))
#particle_swap(self)

# Calculate reward based on updated state & action
reward = self.state.reward_func(state=None, action=action, particles=self.pf.particles)
control_heading = data['bearing'] if data.get('bearing', None) else self.state.sensor_state[2]
control_delta_heading = (control_heading - self.state.sensor_state[2]) % 360
reward = self.state.reward_func(state=None, action=(control_delta_heading,data.get('distance',0)), particles=self.pf.particles)

belief_obs = self.env_observation()

Expand Down
101 changes: 84 additions & 17 deletions birdseye/state.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import random
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from .utils import pol2cart
from .utils import pol2cart, cart2pol


class State:
Expand Down Expand Up @@ -49,7 +49,10 @@ def __init__(self, n_targets=1, prob=0.9, target_speed=None, target_speed_range=
# Setup an initial random state
self.target_state = None
if simulated:
self.update_state = self.update_sim_state
self.target_state = self.init_target_state()
else:
self.update_state = self.update_real_state
# Setup an initial sensor state
self.sensor_state = self.init_sensor_state()

Expand Down Expand Up @@ -241,7 +244,7 @@ def entropy_collision_reward(self, state, action=None, action_idx=None, particle


# returns new state given last state and action (control)
def update_state(self, state, control, target_update=False, transition_overwrite=None):
def update_sim_state(self, state, control=None, transition_overwrite=None, **kwargs):
"""Update state based on state and action
Parameters
Expand All @@ -258,6 +261,9 @@ def update_state(self, state, control, target_update=False, transition_overwrite
"""
# Get current state vars
r, theta, crs, spd = state

spd = random.randint(0,1)

control_spd = control[1]

theta = theta % 360
Expand All @@ -276,24 +282,12 @@ def update_state(self, state, control, target_update=False, transition_overwrite
x, y = pol2cart(r, np.radians(theta))

# Generate next course given current course
if target_update:
spd = random.choice(self.target_speed_range)
if self.target_movement == 'circular':
d_crs, circ_spd = self.circular_control(50)
crs += d_crs
spd = circ_spd
else:
if random.random() >= self.prob_target_change_crs:
crs += random.choice([-1, 1]) * 30
else:
if random.random() >= self.prob_target_change_crs:
crs += random.choice([-1, 1]) * 30
if random.random() >= self.prob_target_change_crs:
crs += random.choice([-1, 1]) * 30
crs %= 360
if crs < 0:
crs += 360

spd = random.randint(0,1)

# Transform changes to coords to cartesian
dx, dy = pol2cart(spd, np.radians(crs))
if transition_overwrite:
Expand All @@ -308,8 +302,81 @@ def update_state(self, state, control, target_update=False, transition_overwrite

return [r, theta, crs, spd]

# returns new state given last state and action (control)
def update_real_state(self, state, distance=None, course=None, heading=None, **kwargs):
"""Update state based on state and action
Parameters
----------
state_vars : list
List of current state variables
control : action (tuple)
Action tuple
Returns
-------
State (array_like)
Updated state values array
"""
if distance is None:
distance = 0
if course is None:
course = 0
if heading is None:
heading = self.sensor_state[2]

# Get current state vars
r, theta_deg, crs, spd = state
if random.random() >= self.prob_target_change_crs:
crs += random.choice([-1, 1]) * 30
spd = random.randint(0,1)
control_spd = distance
control_course = course % 360
control_delta_heading = (heading - self.sensor_state[2]) % 360

# polar -> cartesian
x, y = pol2cart(r, np.radians(theta_deg))

# translate sensor movement
dx, dy = pol2cart(control_spd, np.radians(control_course))
pos = [x - dx, y - dy]

# translate target movement
dx, dy = pol2cart(spd, np.radians(crs))
pos = [pos[0] + dx, pos[1] + dy]

# cartesian -> polar
r, theta = cart2pol(pos[0], pos[1])
theta_deg = np.degrees(theta)

# rotation
theta_deg -= control_delta_heading
theta_deg %= 360
crs -= control_delta_heading
crs %= 360

return [r, theta_deg, crs, spd]

def update_real_sensor(self, distance, course, heading):

r, theta_deg, prev_heading, spd = self.sensor_state
heading = heading if heading else prev_heading

if distance and course:
spd = distance
crs = course % 360
dx, dy = pol2cart(spd, np.radians(crs))
x, y = pol2cart(r, np.radians(theta_deg))
pos = [x + dx, y + dy]

r = np.sqrt(pos[0]**2 + pos[1]**2)
theta_deg = np.degrees(np.arctan2(pos[1], pos[0]))
theta_deg %= 360

self.sensor_state = np.array([r, theta_deg, heading, spd])

def update_sensor(self, control, bearing=None):
r, theta_deg, crs, spd = self.sensor_state
r, theta_deg, crs, old_spd = self.sensor_state

spd = control[1]

Expand Down
16 changes: 11 additions & 5 deletions sigscan.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
'previous_position': None,
'bearing': None,
'previous_bearing': None,
'course': None,
'action_proposal': None,
'action_taken': None,
'reward': None,
Expand All @@ -40,19 +41,22 @@
# Generic data processor
def data_handler(message_data):
global data
data['previous_position'] = data.get('position', None)
data['previous_bearing'] = data.get('bearing', None)
data['previous_position'] = data.get('position', None) if data.get('set_previous', False) else data.get('previous_position', None)
data['previous_bearing'] = data.get('bearing', None) if data.get('set_previous', False) else data.get('previous_bearing', None)

data['rssi'] = message_data.get('rssi', None)
data['position'] = message_data.get('position', None)
data['bearing'] = -float(message_data.get('bearing', None))+90 if is_float(message_data.get('bearing', None)) else get_bearing(data['previous_position'], data['position'])
data['course'] = get_bearing(data['previous_position'], data['position'])
data['bearing'] = -float(message_data.get('bearing', None))+90 if is_float(message_data.get('bearing', None)) else data['course']
data['distance'] = get_distance(data['previous_position'], data['position'])
delta_bearing = (data['bearing'] - data['previous_bearing']) if data['bearing'] and data['previous_bearing'] else None
data['action_taken'] = (delta_bearing, data['distance']) if delta_bearing and data['distance'] else (0,0)

data['drone_position'] = message_data.get('drone_position', None)
if data['drone_position']:
data['drone_position'] = [data['drone_position'][1], data['drone_position'][0]] # swap lon,lat
data['drone_position'] = [data['drone_position'][1], data['drone_position'][0]] # swap lon,lat

data['set_previous'] = False

# MQTT
def on_message(client, userdata, json_message):
Expand Down Expand Up @@ -204,6 +208,7 @@ def main(config=None, debug=False):
# Flask
fig = plt.figure(figsize=(18,10), dpi=50)
ax = fig.subplots()
fig.set_tight_layout(True)
time_step = 0
if config.get('flask', 'false').lower() == 'true':
run_flask(flask_host, flask_port, fig, results, debug)
Expand All @@ -226,7 +231,8 @@ def main(config=None, debug=False):

step_start = timer()
# update belief based on action and sensor observation (sensor is read inside)
belief, reward, observation = env.real_step(data['action_taken'] if data.get('action_taken', None) else (0,0), data.get('bearing', None))
belief, reward, observation = env.real_step(data)
data['set_previous'] = True
step_end = timer()

plot_start = timer()
Expand Down

0 comments on commit 08ee14c

Please sign in to comment.