Skip to content

Commit

Permalink
Refactor and Improvements to #124 (#127)
Browse files Browse the repository at this point in the history
* Removed artifacts

* Removed artifacts
  • Loading branch information
TeachMeTW authored Sep 22, 2024
1 parent 072a363 commit b6e20ae
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 18 deletions.
9 changes: 2 additions & 7 deletions pages/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
dcc.Tab(label='Trajectories', value='tab-trajectories-datatable'),
]),
html.Div(id='tabs-content'),
dcc.Interval(id='interval-load-more', interval=6000, n_intervals=0),
dcc.Interval(id='interval-load-more', interval=10000, n_intervals=0), # default loading at 10s, can be lowered or hightened based on perf (usual process local is 3s)
dcc.Store(id='store-uuids', data=[]), # Store to hold the original UUIDs data
dcc.Store(id='store-loaded-uuids', data={'data': [], 'loaded': False}) # Store to track loaded data
]
Expand Down Expand Up @@ -106,10 +106,6 @@ def render_content(tab, store_uuids, store_excluded_uuids, store_trips, store_de
processed_data = db_utils.add_user_stats(new_data, initial_batch_size)
loaded_data.extend(processed_data)

# Create a Patch object to append data progressively
patched_data = Patch()
patched_data['data'] = processed_data

# Update the store with the new data
loaded_uuids_store['data'] = loaded_data
loaded_uuids_store['loaded'] = len(loaded_data) >= len(uuids_list) # Mark all data as loaded if done
Expand All @@ -126,8 +122,7 @@ def render_content(tab, store_uuids, store_excluded_uuids, store_trips, store_de

df = df.drop(columns=[col for col in df.columns if col not in columns])

# Use the Patch() object to append new data instead of fully replacing the table
logging.debug("Returning patched data to update the UI.")
logging.debug("Returning appened data to update the UI.")
return html.Div([populate_datatable(df)]), loaded_uuids_store, False if not loaded_uuids_store['loaded'] else True


Expand Down
13 changes: 2 additions & 11 deletions utils/db_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,6 @@ def query_demographics():
return dataframes

def query_trajectories(start_date: str, end_date: str, tz: str):

(start_ts, end_ts) = iso_range_to_ts_range(start_date, end_date, tz)
ts = esta.TimeSeries.get_aggregate_time_series()
entries = ts.find_entries(
Expand All @@ -255,14 +254,6 @@ def query_trajectories(start_date: str, end_date: str, tz: str):
df['data.mode_str'] = df['data.mode'].apply(lambda x: ecwm.MotionTypes(x).name if x in set(enum.value for enum in ecwm.MotionTypes) else 'UNKNOWN')
return df

@lru_cache(maxsize=None)
def get_time_series_aggregate():
return esta.TimeSeries.get_aggregate_time_series()

@lru_cache(maxsize=None)
def get_user_profile(user_uuid):
return edb.get_profile_db().find_one({'user_id': user_uuid})

def add_user_stats(user_data, batch_size=5):
start_time = time.time()
time_format = 'YYYY-MM-DD HH:mm:ss'
Expand All @@ -271,10 +262,10 @@ def process_user(user):
user_uuid = UUID(user['user_id'])

# Fetch aggregated data for all users once and cache it
ts_aggregate = get_time_series_aggregate()
ts_aggregate = esta.TimeSeries.get_aggregate_time_series()

# Fetch data for the user, cached for repeated queries
profile_data = get_user_profile(user_uuid)
profile_data = edb.get_profile_db().find_one({'user_id': user_uuid})

total_trips = ts_aggregate.find_entries_count(
key_list=["analysis/confirmed_trip"],
Expand Down

0 comments on commit b6e20ae

Please sign in to comment.