Skip to content

Commit

Permalink
Merge pull request #98 from rte-france/alarm_feature
Browse files Browse the repository at this point in the history
Alarm feature
  • Loading branch information
marota authored Dec 21, 2021
2 parents 5d33d32 + e9bb034 commit 847a50f
Show file tree
Hide file tree
Showing 120 changed files with 8,213 additions and 221 deletions.
45 changes: 45 additions & 0 deletions CustomAgent.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -241,3 +241,48 @@ def merge_combined_actions_subs(self, sub_actions_dict, subs_keys):
acts.append(act)

return acts


class DoNothing_Attention_Agent(BaseAgent):
"""
This is the most basic BaseAgent. It is purely passive, and does absolutely nothing.
As opposed to most reinforcement learning environments, in grid2op, doing nothing is often
the best solution.
"""

def __init__(self, env):
BaseAgent.__init__(self, env.action_space)
self.alarms_lines_area = env.alarms_lines_area
self.alarms_area_names = env.alarms_area_names

def act(self, observation, reward, done=False):
"""
As better explained in the document of :func:`grid2op.BaseAction.update` or
:func:`grid2op.BaseAction.ActionSpace.__call__`.
The preferred way to make an object of type action is to call :func:`grid2op.BaseAction.ActionSpace.__call__`
with the dictionary representing the action. In this case, the action is "do nothing" and it is represented by
the empty dictionary.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The current observation of the :class:`grid2op.Environment.Environment`
reward: ``float``
The current reward. This is the reward obtained by the previous action
done: ``bool``
Whether the episode has ended or not. Used to maintain gym compatibility
Returns
-------
res: :class:`grid2op.Action.Action`
The action chosen by the bot / controller / agent.
"""
res = self.action_space({})

zones_alert = []
if (len(self.alarms_area_names)>=1):
zones_alert=[0]
res = self.action_space({"raise_alarm": zones_alert})
# print(res)
return res



133 changes: 80 additions & 53 deletions generate_sample.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,14 @@

import os
import grid2op
from grid2op.operator_attention import LinearAttentionBudget
from grid2op.Agent import TopologyGreedy, DoNothingAgent
from grid2op.Runner import Runner
from grid2op import make
from CustomAgent import RandomRedispatchAgent, MultipleTopologyAgent
from grid2op import make,make_from_dataset_path
from CustomAgent import RandomRedispatchAgent, MultipleTopologyAgent, DoNothing_Attention_Agent
from grid2op.Parameters import Parameters

dataset = "rte_case14_realistic"
dataset_path = "tests/data/rte_case14_realistic"#"rte_case14_realistic"

# use lightsim2grid to go a lot faster if available
try:
Expand All @@ -35,62 +36,88 @@
print("MultiTopology")
path_save = "grid2viz/data/agents/multiTopology-baseline"
os.makedirs(path_save, exist_ok=True)
with make(dataset, param=params, backend=backend) as env:
agent = MultipleTopologyAgent(env.action_space, env.observation_space)
runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent)
# need to be seeded for reproducibility as this takes random redispatching actions
runner.run(
nb_episode=1,
path_save="grid2viz/data/agents/multiTopology-baseline",
nb_process=1,
max_iter=30,
env_seeds=[0],
agent_seeds=[0],
pbar=True,
)
env.close()

env=make_from_dataset_path(dataset_path, param=params, backend=backend, has_attention_budget=True,
attention_budget_class=LinearAttentionBudget,
kwargs_attention_budget={"max_budget": 3.,
"budget_per_ts": 1. / (12. * 16),
"alarm_cost": 1.,
"init_budget": 2.})
#with make(dataset, param=params, backend=backend) as env:
agent = MultipleTopologyAgent(env.action_space, env.observation_space)
runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent)
# need to be seeded for reproducibility as this takes random redispatching actions
runner.run(
nb_episode=1,
path_save="grid2viz/data/agents/multiTopology-baseline",
nb_process=1,
max_iter=30,
env_seeds=[0],
agent_seeds=[0],
pbar=True,
)
#env.close() #problem closing for now: says already closed


print("redispatching")

with make(dataset, param=params, backend=backend) as env:
agent = RandomRedispatchAgent(env.action_space, env)
runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent)
# need to be seeded for reproducibility as this takes random redispatching actions
runner.run(
nb_episode=1,
path_save="grid2viz/data/agents/redispatching-baseline",
nb_process=1,
max_iter=100,
env_seeds=[0],
agent_seeds=[0],
pbar=True,
)
env.close()
#with make(dataset, param=params, backend=backend2) as env2:
agent = RandomRedispatchAgent(env.action_space, env)
runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent)
# need to be seeded for reproducibility as this takes random redispatching actions
runner.run(
nb_episode=1,
path_save="grid2viz/data/agents/redispatching-baseline",
nb_process=1,
max_iter=100,
env_seeds=[0],
agent_seeds=[0],
pbar=True,
)
#env2.close()

print("do-nothing")
with make(dataset, param=params, backend=backend) as env:
agent = DoNothingAgent(env.action_space)
runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent)
runner.run(
nb_episode=2,
path_save="grid2viz/data/agents/do-nothing-baseline",
nb_process=1,
max_iter=2000,
pbar=True,
)
env.close()
#with make(dataset, param=params, backend=backend) as env:
agent = DoNothingAgent(env.action_space)
runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent)
runner.run(
nb_episode=2,
path_save="grid2viz/data/agents/do-nothing-baseline",
nb_process=1,
max_iter=2000,
pbar=True,
)
#env.close()


print("greedy")
with make(dataset, param=params, backend=backend) as env:
agent = TopologyGreedy(env.action_space)
runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent)
runner.run(
nb_episode=2,
path_save="grid2viz/data/agents/greedy-baseline",
nb_process=1,
max_iter=2000,
pbar=True,
)
env.close()
#with make(dataset, param=params, backend=backend) as env:
agent = TopologyGreedy(env.action_space)
runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent)
runner.run(
nb_episode=2,
path_save="grid2viz/data/agents/greedy-baseline",
nb_process=1,
max_iter=2000,
pbar=True,
)

print("alarm-agent")
agent = DoNothing_Attention_Agent(env)
runner = Runner(
**env.get_params_for_runner(), agentClass=None, agentInstance=agent
)
# need to be seeded for reproducibility as this takes random redispatching actions
runner.run(
nb_episode=1,
path_save="grid2viz/data/agents/alarm-baseline",
nb_process=1,
max_iter=10,
env_seeds=[0],
agent_seeds=[0],
pbar=True,
)

env.close()


Empty file modified grid2viz/src/episodes/episodes_clbk.py
100644 → 100755
Empty file.
80 changes: 57 additions & 23 deletions grid2viz/src/episodes/episodes_lyt.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,68 +11,102 @@
import plotly.figure_factory as ff
import seaborn as sn

from grid2viz.src.manager import survival_df, grid2viz_home_directory, scenarios
from grid2viz.src.manager import survival_df, attention_df, grid2viz_home_directory, scenarios
from grid2viz.src.utils.constants import DONT_SHOW_FILENAME
from grid2viz.src.utils.layout_helpers import modal, should_help_open


# This is to improve readability of the heatmap of survival steps for agents
def get_heatmap_survival_score(df):
if (df.shape[0] >= 2) and (df.shape[1] >= 2):
clustered_df = sn.clustermap(df)
def get_heatmap_survival_attention_score(df_survival,df_attention):
if (df_survival.shape[0] >= 2) and (df_survival.shape[1] >= 2):
clustered_df = sn.clustermap(df_survival)
reordered_scenarios = clustered_df.dendrogram_row.reordered_ind
reordered_agents = clustered_df.dendrogram_col.reordered_ind
return df.iloc[reordered_scenarios, reordered_agents]
return df_survival.iloc[reordered_scenarios, reordered_agents], df_attention.iloc[reordered_scenarios, reordered_agents]
else:
return df
return df_survival, df_attention


def create_heatmap_figure(df):
clustered_survival_df = get_heatmap_survival_score(df)
def create_heatmap_figures(df_survival, df_attention):
clustered_survival_df, clustered_attention_df = get_heatmap_survival_attention_score(df_survival, df_attention)

z_text = clustered_survival_df.copy().astype(str)
z_text[z_text == "-1"] = ""

heatmap_figure = ff.create_annotated_heatmap( # go.Figure(data=go.Heatmap(
heatmap_figure_survival = ff.create_annotated_heatmap( # go.Figure(data=go.Heatmap(
z=clustered_survival_df.values, # survival_df.values,#z=pd.concat([survival_df, survival_df]))),
x=clustered_survival_df.columns.tolist(),
y=clustered_survival_df.index.tolist(),
colorscale="RdYlGn",
zmid=50,
annotation_text=z_text.values,
)
heatmap_figure.update_layout(
heatmap_figure_survival.update_layout(
{"yaxis": {"type": "category"}, "xaxis": {"type": "category"}}
)
return heatmap_figure

z_text = clustered_attention_df.copy().astype(str)
#z_text[z_text == "-1"] = ""
heatmap_figure_attention = ff.create_annotated_heatmap( # go.Figure(data=go.Heatmap(
z=clustered_attention_df.values, # survival_df.values,#z=pd.concat([survival_df, survival_df]))),
x=clustered_attention_df.columns.tolist(),
y=clustered_attention_df.index.tolist(),
colorscale="RdYlGn",
zmid=0.5,
annotation_text=z_text.values,
)
heatmap_figure_attention.update_layout(
{"yaxis": {"type": "category"}, "xaxis": {"type": "category"}}
)

def generate_heatmap_components(df):
heatmap_div = html.Div(
return heatmap_figure_survival, heatmap_figure_attention



def generate_heatmap_components(df_survival, df_attention):

heatmap_survival, heatmap_attention=create_heatmap_figures(df_survival, df_attention)
heatmap_survival_div = html.Div(
children=[
html.H5("Percentage of Agents' survival time over a scenario"),

html.H3("Percentage of Agents' survival time over a scenario"),
dcc.Graph(
id="heatmap",
figure=create_heatmap_figure(df),
id="heatmap survival",
figure=heatmap_survival,
),
],
className="col-xl-12 align-self-center heatmap",
className="four columns"#"col-xl-12 align-self-center heatmap",
)

return html.Div(
className="lineBlockSlim card",
heatmap_attention_div = html.Div(
children=[
dbc.Collapse(heatmap_div, id="collapse"),
scenarios_filter(sorted(list(scenarios))),
html.H3("Agent attention score over a scenario"),
dcc.Graph(
id="heatmap attention",
figure=heatmap_attention,
),
],
className="four columns"#"col-xl-12 align-self-center heatmap",
)

heatmaps_div=html.Div(className="row", children=[heatmap_survival_div, heatmap_attention_div])

return html.Div(
className="lineBlockSlim card",
children=[html.Div(className="row",
children=[dbc.Collapse(heatmaps_div, id="collapse"),
scenarios_filter(sorted(list(scenarios)))
])
],
)


def scenarios_filter(scenarios):
return html.Div(
id="scenario_filter_div",
className="four columns",
children=[
html.H5("Select the scenarios you want to see below."),
html.H5("Select the scenarios you want to see in cards below."),
dac.Select(
id="scenarios_filter",
options=[
Expand Down Expand Up @@ -115,7 +149,7 @@ def layout():
children=[
dcc.Store(id="relayoutStoreScenario"),
comparison_button(),
generate_heatmap_components(survival_df),
generate_heatmap_components(survival_df, attention_df),
dbc.Row(id="cards_container", className="m-1"),
modal(id_suffix="episodes", is_open=open_help, header=header, body=body),
],
Expand Down
Loading

0 comments on commit 847a50f

Please sign in to comment.