Skip to content

Commit

Permalink
adressing issue #76, adressing bug in data generation for the forecasts
Browse files Browse the repository at this point in the history
  • Loading branch information
BDonnot committed Jan 23, 2024
1 parent 9de298e commit a90403d
Show file tree
Hide file tree
Showing 8 changed files with 179 additions and 35 deletions.
2 changes: 1 addition & 1 deletion chronix2grid/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Chronix2Grid, A python package to generate "en-masse" chronics for loads and productions (thermal, renewable)

___version__ = "1.2.0.post1"
___version__ = "1.2.1"
45 changes: 30 additions & 15 deletions chronix2grid/grid2op_utils/add_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,20 +19,21 @@ def generate_a_scenario_wrapper(args):
(path_env, name_gen, gen_type, output_dir,
start_date, dt, scen_id, load_seed, renew_seed,
gen_p_forecast_seed, handle_loss, files_to_copy,
save_ref_curve, day_lag, tol_zero, debug) = args
save_ref_curve, day_lag, tol_zero, debug, load_weekly_pattern) = args
res_gen = generate_a_scenario(path_env,
name_gen, gen_type,
output_dir,
start_date, dt,
scen_id,
load_seed, renew_seed,
gen_p_forecast_seed,
handle_loss,
files_to_copy=files_to_copy,
save_ref_curve=save_ref_curve,
day_lag=day_lag,
tol_zero=tol_zero,
debug=debug)
name_gen, gen_type,
output_dir,
start_date, dt,
scen_id,
load_seed, renew_seed,
gen_p_forecast_seed,
handle_loss,
files_to_copy=files_to_copy,
save_ref_curve=save_ref_curve,
day_lag=day_lag,
tol_zero=tol_zero,
debug=debug,
load_weekly_pattern=load_weekly_pattern)
return res_gen


Expand All @@ -47,7 +48,8 @@ def add_data(env: grid2op.Environment.Environment,
save_ref_curve=False,
day_lag=6, # TODO 6 because it's 2050
debug=False,
tol_zero=1e-3
tol_zero=1e-3,
load_weekly_pattern=None,
):
"""This function adds some data to already existing scenarios.
Expand All @@ -68,6 +70,18 @@ def add_data(env: grid2op.Environment.Environment,
with_loss: ``bool``
Do you make sure that the generated data will not be modified too much when running with grid2op (default = True).
Setting it to False will speed up (by quite a lot) the generation process, but will degrade the data quality.
load_weekly_pattern: pd.DataFrame
The pattern used as a reference to generate the loads.
It sould be a dataframe with 2 columns: `datetime` and `test`.
In the first column (`datetime`) you should have time stamp (format "%Y-%m-%d %H:%M:%S"
*eg* `2017-01-07 23:55:00`). The second oui should have a number "approximately one" which
gives the relative ratio of demand for the whole grid at this time stamp.
We only tested this when the data was given at 5 minutes resolution (two consecutive rows are
distant from 5 minutes) and with the equivalent of 2 years of data. It might work
(or not) in other cases...
"""
# required parameters
Expand Down Expand Up @@ -126,7 +140,8 @@ def add_data(env: grid2op.Environment.Environment,
save_ref_curve,
day_lag,
tol_zero,
debug
debug,
load_weekly_pattern
))
if nb_core == 1:
for args in argss:
Expand Down
32 changes: 22 additions & 10 deletions chronix2grid/grid2op_utils/gen_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,8 +166,7 @@ def fix_forecast_ramps(nb_h,

if not has_error[indx_forecasts[0]]:
res_gen_p[indx_forecasts] = 1.0 * gen_p_after_optim[1:,:]
amount_curtailed_for[indx_forecasts] = curt_t.value[1:]

amount_curtailed_for[indx_forecasts] = curt_t.value[1:]
# last value is not used anyway
# res_gen_p[-1, :] = 1.0 * gen_p_after_optim[1,:]
has_error[-nb_h:] = True
Expand Down Expand Up @@ -300,7 +299,9 @@ def generate_new_gen_forecasts(prng,
keep_first_dim=True)

# fix the value of the forecast (if above pmax or bellow pmin for example)
fun_fix(gen_p_for_this_type, gen_p_this_type, gen_carac_this_type["Pmax"].values)
fun_fix(gen_p_for_this_type,
gen_p_this_type,
gen_carac_this_type["Pmax"].values)

# now put everything in the right shape
if res_gen_p_forecasted is None:
Expand Down Expand Up @@ -342,19 +343,30 @@ def generate_forecasts_gen(new_forecasts,
res_gen_p_forecasted_df = res_gen_p_forecasted_df.shift(-1)
res_gen_p_forecasted_df.iloc[-1] = 1.0 * res_gen_p_forecasted_df.iloc[-2]
nb_h = 1

# "fix" cases where forecasts are bellow the loads => in that case scale the
# controlable generation to be at least 1% above total demand
loss_before_ramps_for = 1.01
total_gen = res_gen_p_forecasted_df.sum(axis=1)
total_demand = load_p_forecasted.sum(axis=1)
mask_ko = total_gen <= total_demand
nb_concerned = (mask_ko).sum()
mask_not_enough_gen = total_gen <= loss_before_ramps_for * total_demand
nb_concerned = (mask_not_enough_gen).sum()
tmp = type(env_for_loss).gen_pmax[env_for_loss.gen_redispatchable]
tmp = tmp / tmp.sum()
rep_factor = np.tile(tmp.reshape(-1,1), nb_concerned).T
res_gen_p_forecasted_df.loc[mask_ko, type(env_for_loss).gen_redispatchable] *= (1.01 * total_demand - total_gen)[mask_ko].values.reshape(-1,1) * rep_factor

# and fix the ramps (an optimizer, step by step)
rep_factor = np.tile(tmp.reshape(-1,1), nb_concerned).T # how to split 1MW on the controlable generators
est_losses_mw = (loss_before_ramps_for * total_demand - total_gen) # opposite of the loss per step
# we increase the controlable generation when there is not enough generation (loss positive)
# res_gen_p_forecasted_df.loc[mask_not_enough_gen, type(env_for_loss).gen_redispatchable] *= est_losses_mw[mask_not_enough_gen].values.reshape(-1,1) * rep_factor
res_gen_p_forecasted_df.loc[mask_not_enough_gen, type(env_for_loss).gen_redispatchable] += est_losses_mw[mask_not_enough_gen].values.reshape(-1,1) * rep_factor
# the above increase can lead to generator above pmax, when this is the case,
# I cut it
gen_pmax = type(env_for_loss).gen_pmax
for gen_id, is_disp in enumerate(type(env_for_loss).gen_redispatchable):
if not is_disp:
continue
res_gen_p_forecasted_df.iloc[:, gen_id] = np.minimum(res_gen_p_forecasted_df.iloc[:, gen_id].values,
gen_pmax[gen_id])
# and fix the ramps (for all h) (an optimizer is run t by t)
tmp_ = fix_forecast_ramps(nb_h,
load_p,
load_p_forecasted,
Expand Down
8 changes: 6 additions & 2 deletions chronix2grid/grid2op_utils/loads_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,8 @@ def generate_loads(path_env,
number_of_minutes,
generic_params,
load_q_from_p_coeff_default=0.7,
day_lag=6):
day_lag=6,
load_weekly_pattern=None):
"""
This function generates the load for each consumption on a grid
Expand Down Expand Up @@ -177,7 +178,10 @@ def generate_loads(path_env,

loads_charac = pd.read_csv(os.path.join(path_env, "loads_charac.csv"), sep=",")
gen_charac = pd.read_csv(os.path.join(path_env, "prods_charac.csv"), sep=",")
load_weekly_pattern = pd.read_csv(os.path.join(ref_pattern_path, "load_weekly_pattern.csv"), sep=",")
if load_weekly_pattern is None:
load_weekly_pattern = pd.read_csv(os.path.join(ref_pattern_path, "load_weekly_pattern.csv"), sep=",")
else:
load_weekly_pattern = pd.DataFrame(load_weekly_pattern)

if new_forecasts:
load_p, load_p_forecasted, load_ref_curve = generate_new_loads(load_seed,
Expand Down
6 changes: 4 additions & 2 deletions chronix2grid/grid2op_utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -922,7 +922,8 @@ def generate_a_scenario(path_env,
save_ref_curve=False,
day_lag=6, # TODO 6 because it's 2050
tol_zero=1e-3,
debug=True # TODO more feature !
debug=True, # TODO more feature !
load_weekly_pattern=None,
):
"""This function generates and save the data for a scenario.
Expand Down Expand Up @@ -987,7 +988,8 @@ def generate_a_scenario(path_env,
dt,
number_of_minutes,
generic_params,
day_lag=day_lag
day_lag=day_lag,
load_weekly_pattern=load_weekly_pattern
)
(new_forecasts, forecasts_params, load_params, loads_charac,
load_p, load_q, load_p_forecasted, load_q_forecasted, load_ref) = tmp_
Expand Down
8 changes: 4 additions & 4 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@
# -- Project information -----------------------------------------------------

project = 'ChroniX2Grid'
copyright = '2020, Antoine Marot, Mario Jothy, Nicolas Megel'
author = 'Antoine Marot, Mario Jothy, Nicolas Megel'
copyright = '2020, Antoine Marot, Mario Jothy, Nicolas Megel, Benjamin Donnot'
author = 'Antoine Marot, Mario Jothy, Nicolas Megel, Benjamin Donnot'

# The full version, including alpha/beta/rc tags
release = '1.1.0.post1'
version = '1.1'
release = '1.2.1'
version = '1.2'


# -- General configuration ---------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@


setup(name='Chronix2Grid',
version='1.2.0.post1',
version='1.2.1',
description='A python package to generate "en-masse" chronics for loads and productions (thermal, renewable)',
long_description=long_description,
long_description_content_type='text/markdown',
Expand Down
111 changes: 111 additions & 0 deletions tests/integration_tests/test_grid2oputils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
# Copyright (c) 2024, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Chronix2Grid, A python package to generate "en-masse" chronics for loads and productions (thermal, renewable)

import copy
import os
import json
import unittest
import warnings
import tempfile
import shutil
import numpy as np
import pandas as pd
import datetime
from packaging import version
import grid2op
from grid2op.Chronics import ChangeNothing

from chronix2grid.getting_started.example.input.generation.patterns import ref_pattern_path


class TestGrid2opUtils(unittest.TestCase):
def setUp(self) -> None:
if version.parse(grid2op.__version__) < version.parse("1.9.8"):
# a fix in grid2Op 1.9.8 : the "loads_charac.csv" was not
# part of the data shipped with the package before
self.skipTest(f"grid2op version too old {grid2op.__version__} < 1.9.8")
return super().setUp()

def test_not_too_high_value_forecasts(self):
seed = 0
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with grid2op.make("l2rpn_idf_2023", test=True) as env:
path = env.get_path_env()
tmp_dir = tempfile.TemporaryDirectory()
new_env_path = os.path.join(tmp_dir.name, "l2rpn_idf_2023")
shutil.copytree(path, new_env_path)
shutil.rmtree(os.path.join(new_env_path, "chronics"))
# keep only the first data (not to generate everything)
with open(os.path.join(new_env_path, "scenario_params.json"), "r") as f:
scenario_params = json.load(f)
scenario_params["all_dates"] = scenario_params["all_dates"][:1]
with open(os.path.join(new_env_path, "scenario_params.json"), "w") as f:
json.dump(fp=f, obj=scenario_params)
env = grid2op.make(new_env_path,
chronics_class=ChangeNothing,
**grid2op.Opponent.get_kwargs_no_opponent())
env.generate_data(load_weekly_pattern=None, nb_year=1, seed=seed, save_ref_curve=True)
gen_p_for_orig = pd.read_csv(os.path.join(new_env_path,
"chronics",
"2035-01-01_0",
"prod_p_forecasted.csv.bz2"),
sep=";")
assert (gen_p_for_orig.iloc[:,2] <= type(env).gen_pmax[2]).all()
tmp_dir.cleanup()

def test_load_weekly_pattern(self):
seed = 0
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with grid2op.make("l2rpn_wcci_2022", test=True) as env:
path = env.get_path_env()
tmp_dir = tempfile.TemporaryDirectory()
new_env_path = os.path.join(tmp_dir.name, "l2rpn_wcci_2022")
shutil.copytree(path, new_env_path)
shutil.rmtree(os.path.join(new_env_path, "chronics"))
# keep only the first data (not to generate everything)
with open(os.path.join(new_env_path, "scenario_params.json"), "r") as f:
scenario_params = json.load(f)
scenario_params["all_dates"] = scenario_params["all_dates"][:1]
with open(os.path.join(new_env_path, "scenario_params.json"), "w") as f:
json.dump(fp=f, obj=scenario_params)
env = grid2op.make(new_env_path,
chronics_class=ChangeNothing,
**grid2op.Opponent.get_kwargs_no_opponent())
env.generate_data(load_weekly_pattern=None, nb_year=1, seed=seed, save_ref_curve=True)
load_weekly_pattern = pd.read_csv(os.path.join(ref_pattern_path, "load_weekly_pattern.csv"), sep=",")
load_ref_orig = np.load(os.path.join(new_env_path, "chronics", "2050-01-03_0", "load_ref.npy"))
total_demand_orig = np.sum(load_ref_orig, axis=1)

# change the load weekly pattern
load_weekly_pattern2 = load_weekly_pattern[load_weekly_pattern["datetime"] >= "2018"]
load_weekly_pattern3 = load_weekly_pattern[load_weekly_pattern["datetime"] < "2018"]
load_weekly_pattern_new = pd.concat([load_weekly_pattern2, load_weekly_pattern3])
load_weekly_pattern_new.reset_index(inplace=True)
load_weekly_pattern_new = load_weekly_pattern_new[["datetime", "test"]]
load_weekly_pattern_new["datetime"] = copy.deepcopy(load_weekly_pattern["datetime"])
# delete original data
shutil.rmtree(os.path.join(new_env_path, "chronics"))

# start a new generation
env.generate_data(load_weekly_pattern=load_weekly_pattern_new, nb_year=1, seed=seed, save_ref_curve=True)
load_ref_new = np.load(os.path.join(new_env_path, "chronics", "2050-01-03_0", "load_ref.npy"))
total_demand_new = np.sum(load_ref_new, axis=1)

# recompute ref case to make sure it works
shutil.rmtree(os.path.join(new_env_path, "chronics"))
env.generate_data(load_weekly_pattern=None, nb_year=1, seed=seed, save_ref_curve=True)
load_ref_orig2 = np.load(os.path.join(new_env_path, "chronics", "2050-01-03_0", "load_ref.npy"))
total_demand_orig2 = np.sum(load_ref_orig2, axis=1)

# compate the ref curves and the load data
assert np.allclose(total_demand_orig, total_demand_orig2)
assert not np.allclose(total_demand_orig, total_demand_new)
tmp_dir.cleanup()

0 comments on commit a90403d

Please sign in to comment.