From e5fb0916826f1565c1901baacbc6337cdadc5113 Mon Sep 17 00:00:00 2001 From: "Thom R. Edwards" Date: Sat, 6 Jul 2024 13:50:34 -0600 Subject: [PATCH] extremely hacky single flow diagram --- gtep/driver.py | 40 ++--- gtep/gtep_model.py | 393 ++++++++++++++++++++++++++++-------------- gtep/gtep_solution.py | 23 +-- 3 files changed, 294 insertions(+), 162 deletions(-) diff --git a/gtep/driver.py b/gtep/driver.py index 95c74f1..77b2c6a 100644 --- a/gtep/driver.py +++ b/gtep/driver.py @@ -9,29 +9,29 @@ data_path = "./gtep/data/5bus" data_object = ExpansionPlanningData() data_object.load_prescient(data_path) -mod_object = ExpansionPlanningModel( - stages=2, - data=data_object.md, - num_reps=2, - len_reps=1, - num_commit=24, - num_dispatch=12, -) -mod_object.create_model() -TransformationFactory("gdp.bound_pretransformation").apply_to(mod_object.model) -TransformationFactory("gdp.bigm").apply_to(mod_object.model) -# opt = SolverFactory("gurobi") -# opt = Gurobi() -opt = Highs() -# mod_object.results = opt.solve(mod_object.model, tee=True) -mod_object.results = opt.solve(mod_object.model) - - +# mod_object = ExpansionPlanningModel( +# stages=2, +# data=data_object.md, +# num_reps=2, +# len_reps=1, +# num_commit=24, +# num_dispatch=12, +# ) +# mod_object.create_model() +# TransformationFactory("gdp.bound_pretransformation").apply_to(mod_object.model) +# TransformationFactory("gdp.bigm").apply_to(mod_object.model) +# # opt = SolverFactory("gurobi") +# # opt = Gurobi() +# opt = Highs() +# # mod_object.results = opt.solve(mod_object.model, tee=True) +# mod_object.results = opt.solve(mod_object.model) + +sol_object = ExpansionPlanningSolution() sol_object.import_data_object(data_object) # sol_object.read_json("./gtep_lots_of_buses_solution.json") # "./gtep/data/WECC_USAEE" -sol_object.read_json("./gtep_11bus_solution.json") # "./gtep/data/WECC_Reduced_USAEE" -# sol_object.read_json("./gtep_solution.json") +# sol_object.read_json("./gtep_11bus_solution.json") # "./gtep/data/WECC_Reduced_USAEE" +sol_object.read_json("./gtep_solution.json") # sol_object.read_json("./updated_gtep_solution_test.json") # sol_object.read_json("./gtep_wiggles.json") sol_object.plot_levels(save_dir="./plots/") diff --git a/gtep/gtep_model.py b/gtep/gtep_model.py index 08f5e4a..5f1cbc1 100644 --- a/gtep/gtep_model.py +++ b/gtep/gtep_model.py @@ -15,6 +15,8 @@ from pyomo.repn.linear import LinearRepnVisitor import json +import numpy as np + from math import ceil # Define what a USD is for pyomo units purposes @@ -77,10 +79,9 @@ def __init__( def create_model(self): """Create concrete Pyomo model object associated with the ExpansionPlanningModel""" - + self.timer.tic("Creating GTEP Model") m = ConcreteModel() - ## TODO: checks for active/built/inactive/unbuilt/etc. gen ## NOTE: scale_ModelData_to_pu doesn't account for expansion data -- does it need to? @@ -200,7 +201,6 @@ def genDisabled(disj, gen): def genExtended(disj, gen): return - @b.Disjunction(m.thermalGenerators) def investStatus(disj, gen): return [ @@ -210,10 +210,9 @@ def investStatus(disj, gen): disj.genDisabled[gen], disj.genExtended[gen], ] - - + # JSC inprog - the gen and line investments should be separate disjunctions - # because the associated variables and constraints we'll disjunct on are + # because the associated variables and constraints we'll disjunct on are # different. # @b.Disjunction(m.thermalGenerators, m.branches) # def investStatus(disj, gen, branch): @@ -223,7 +222,7 @@ def investStatus(disj, gen): # disj.branchRetired[branch], # disj.branchDisabled[branch], # disj.branchExtended[branch] - # ] + # ] # Renewable generator MW values (operational, installed, retired, extended) b.renewableOperational = Var( @@ -259,16 +258,14 @@ def add_investment_constraints( ## TODO: Fix var value rather than add constraint @b.LogicalConstraint(m.thermalGenerators) def thermal_uninvested(b, gen): - - - + if m.md.data["elements"]["generator"][gen]["in_service"] == False: # JSC test - fix candidate generator to uninvested # return exactly(1, b.genDisabled[gen].indicator_var) return exactly(0, b.genOperational[gen].indicator_var) else: return LogicalConstraint.Skip - + # JSC inprog # ## TODO: Fix var value rather than add constraint # @b.LogicalConstraint(m.branches) @@ -404,8 +401,7 @@ def investment_cost(b): * b.renewableExtended[gen] for gen in m.renewableGenerators ) - - #JSC inprog - add branch investment costs here + # JSC inprog - add branch investment costs here ) # Curtailment penalties for investment period @@ -493,22 +489,21 @@ def generatorCost(b, gen): # Define bounds on transmission line capacity def power_flow_limits(b, transmissionLine): - + return ( - -m.transmissionCapacity[transmissionLine], - m.transmissionCapacity[transmissionLine], - ) - - # JSC inprog - will swap to branchInUse once Transmission Switching is + -m.transmissionCapacity[transmissionLine], + m.transmissionCapacity[transmissionLine], + ) + + # JSC inprog - will swap to branchInUse once Transmission Switching is # implemented # if i_p.branchDisabled | i_p.branchRetired: # return (0,0) - # else: + # else: # return ( # -m.transmissionCapacity[transmissionLine], # m.transmissionCapacity[transmissionLine], # ) - # NOTE: this is an abuse of units and needs to be fixed for variable temporal resolution b.powerFlow = Var( @@ -589,13 +584,12 @@ def delta_bus_angle_bounds(b, line): b.renewableCurtailmentDispatch = sum( b.renewableCurtailment[gen] for gen in m.renewableGenerators ) - - - #disj.branchOperational[branch], + + # disj.branchOperational[branch], # Branches cannot be used unless they are operational or just installed. - # Maybe we want to go ahead and set up the disjunct for transmission switching - # because we'll have to delineate between lines installed but turned off and + # Maybe we want to go ahead and set up the disjunct for transmission switching + # because we'll have to delineate between lines installed but turned off and # lines not installed or retired. # @b.LogicalConstraint(m.branches) # def use_active_branches_only(b, branch): @@ -610,32 +604,27 @@ def delta_bus_angle_bounds(b, line): # ) -def add_dispatch_constraints( - b, - disp_per -): +def add_dispatch_constraints(b, disp_per): """Add dispatch-associated inequalities to representative period block.""" m = b.model() c_p = b.parent_block() r_p = c_p.parent_block() i_p = r_p.parent_block() - import numpy as np - # JSC: how do we actually fix the seed as a sequence over all dispatch periods? - # Fixing a seed within the add_dispatch_constraints fxn doesn't work - it - # repeats the load values for each period, which seems to always lead to + # Fixing a seed within the add_dispatch_constraints fxn doesn't work - it + # repeats the load values for each period, which seems to always lead to # all generators always on (???) - - rng = np.random.default() - + + rng = np.random.default_rng() + for key in m.loads.keys(): - m.loads[key] *= max(0,rng.normal(1.0, 0.5)) + m.loads[key] *= max(0, rng.normal(1.0, 0.5)) - # JSC question: Will forcing uninvested lines to have no power flow (via - # the flow limits) cause infeasibility? Driving idea is: do we need to - # instead/additionally adjust the dc_power_flow and flow_balance - # constraints based on the disjunction for line investment? Intuitively, + # JSC question: Will forcing uninvested lines to have no power flow (via + # the flow limits) cause infeasibility? Driving idea is: do we need to + # instead/additionally adjust the dc_power_flow and flow_balance + # constraints based on the disjunction for line investment? Intuitively, # no, it should function identically as if the line didn't exist, but idk # if there's an interplay with the bus angle equation that could cause # issues. Maybe the thing to do is to make a disjunction here on the lines @@ -992,22 +981,23 @@ def commitment_period_rule(b, commitment_period): if m.data_list: m.md = m.data_list[i_p.representativePeriods.index(r_p.currentPeriod)] - # JSC update - had to make an exception for cases where gens were candidates - # bc their time series reduced to single values. Will probably need to fix + # JSC update - had to make an exception for cases where gens were candidates + # bc their time series reduced to single values. Will probably need to fix # this and look at where that reduction is taking place because we need more - # than a single value if the generator is built. (Probably? Maybe there's a - # different way to handle candidate renewable data because this assumes + # than a single value if the generator is built. (Probably? Maybe there's a + # different way to handle candidate renewable data because this assumes # knowledge of the future outputs of a candidate... could be captured by scenarios?) # Maximum output of each renewable generator m.renewableCapacity = { - renewableGen: 0 - if type(m.md.data["elements"]["generator"][renewableGen]["p_max"])==float - else m.md.data["elements"]["generator"][renewableGen]["p_max"][ - "values" - ][commitment_period - 1] + renewableGen: ( + 0 + if type(m.md.data["elements"]["generator"][renewableGen]["p_max"]) == float + else m.md.data["elements"]["generator"][renewableGen]["p_max"]["values"][ + commitment_period - 1 + ] + ) for renewableGen in m.renewableGenerators } - ## TODO: Redesign load scaling and allow nature of it as argument # Demand at each bus @@ -1061,32 +1051,56 @@ def add_representative_period_constraints(b, rep_per): m = b.model() i_p = b.parent_block() - # JSC update: Done(?) @b.LogicalConstraint(b.commitmentPeriods, m.thermalGenerators) def consistent_commitment_shutdown(b, commitmentPeriod, thermalGen): - req_shutdown_periods = ceil(1/float(m.md.data["elements"]["generator"][thermalGen]["ramp_down_rate"])) + req_shutdown_periods = ceil( + 1 / float(m.md.data["elements"]["generator"][thermalGen]["ramp_down_rate"]) + ) return ( - atmost(req_shutdown_periods-1, - [b.commitmentPeriod[commitmentPeriod-j-1].genShutdown[thermalGen].indicator_var for j in - range(min(req_shutdown_periods,commitmentPeriod-1))]) - .land(b.commitmentPeriod[commitmentPeriod-1].genShutdown[thermalGen].indicator_var) - #| b.commitmentPeriod[commitmentPeriod-1].genOn.indicator_var) + atmost( + req_shutdown_periods - 1, + [ + b.commitmentPeriod[commitmentPeriod - j - 1] + .genShutdown[thermalGen] + .indicator_var + for j in range(min(req_shutdown_periods, commitmentPeriod - 1)) + ], + ).land( + b.commitmentPeriod[commitmentPeriod - 1] + .genShutdown[thermalGen] + .indicator_var + ) + # | b.commitmentPeriod[commitmentPeriod-1].genOn.indicator_var) .implies( - b.commitmentPeriod[commitmentPeriod].genShutdown[thermalGen].indicator_var + b.commitmentPeriod[commitmentPeriod] + .genShutdown[thermalGen] + .indicator_var ) if commitmentPeriod != 1 else LogicalConstraint.Skip ) - + @b.LogicalConstraint(b.commitmentPeriods, m.thermalGenerators) def consistent_commitment_off_after_shutdown(b, commitmentPeriod, thermalGen): - req_shutdown_periods = ceil(1/float(m.md.data["elements"]["generator"][thermalGen]["ramp_down_rate"])) + req_shutdown_periods = ceil( + 1 / float(m.md.data["elements"]["generator"][thermalGen]["ramp_down_rate"]) + ) return ( - atleast(req_shutdown_periods, - [b.commitmentPeriod[commitmentPeriod-j-1].genShutdown[thermalGen].indicator_var for j in - range(min(req_shutdown_periods,commitmentPeriod-1))]) - .land(b.commitmentPeriod[commitmentPeriod-1].genShutdown[thermalGen].indicator_var) + atleast( + req_shutdown_periods, + [ + b.commitmentPeriod[commitmentPeriod - j - 1] + .genShutdown[thermalGen] + .indicator_var + for j in range(min(req_shutdown_periods, commitmentPeriod - 1)) + ], + ) + .land( + b.commitmentPeriod[commitmentPeriod - 1] + .genShutdown[thermalGen] + .indicator_var + ) .implies( b.commitmentPeriod[commitmentPeriod].genOff[thermalGen].indicator_var ) @@ -1094,32 +1108,56 @@ def consistent_commitment_off_after_shutdown(b, commitmentPeriod, thermalGen): else LogicalConstraint.Skip ) - # JSC update: Done(?) @b.LogicalConstraint(b.commitmentPeriods, m.thermalGenerators) def consistent_commitment_startup(b, commitmentPeriod, thermalGen): - req_startup_periods = ceil(1/float(m.md.data["elements"]["generator"][thermalGen]["ramp_up_rate"])) + req_startup_periods = ceil( + 1 / float(m.md.data["elements"]["generator"][thermalGen]["ramp_up_rate"]) + ) return ( - atmost(req_startup_periods-1, - [b.commitmentPeriod[commitmentPeriod-j-1].genStartup[thermalGen].indicator_var for j in - range(min(req_startup_periods,commitmentPeriod-1))]) - .land(b.commitmentPeriod[commitmentPeriod-1].genStartup[thermalGen].indicator_var) - #| b.commitmentPeriod[commitmentPeriod-1].genOn.indicator_var) + atmost( + req_startup_periods - 1, + [ + b.commitmentPeriod[commitmentPeriod - j - 1] + .genStartup[thermalGen] + .indicator_var + for j in range(min(req_startup_periods, commitmentPeriod - 1)) + ], + ).land( + b.commitmentPeriod[commitmentPeriod - 1] + .genStartup[thermalGen] + .indicator_var + ) + # | b.commitmentPeriod[commitmentPeriod-1].genOn.indicator_var) .implies( - b.commitmentPeriod[commitmentPeriod].genStartup[thermalGen].indicator_var + b.commitmentPeriod[commitmentPeriod] + .genStartup[thermalGen] + .indicator_var ) if commitmentPeriod != 1 else LogicalConstraint.Skip ) - + @b.LogicalConstraint(b.commitmentPeriods, m.thermalGenerators) def consistent_commitment_on_after_startup(b, commitmentPeriod, thermalGen): - req_startup_periods = ceil(1/float(m.md.data["elements"]["generator"][thermalGen]["ramp_up_rate"])) + req_startup_periods = ceil( + 1 / float(m.md.data["elements"]["generator"][thermalGen]["ramp_up_rate"]) + ) return ( - atleast(req_startup_periods, - [b.commitmentPeriod[commitmentPeriod-j-1].genStartup[thermalGen].indicator_var for j in - range(min(req_startup_periods,commitmentPeriod-1))]) - .land(b.commitmentPeriod[commitmentPeriod-1].genStartup[thermalGen].indicator_var) + atleast( + req_startup_periods, + [ + b.commitmentPeriod[commitmentPeriod - j - 1] + .genStartup[thermalGen] + .indicator_var + for j in range(min(req_startup_periods, commitmentPeriod - 1)) + ], + ) + .land( + b.commitmentPeriod[commitmentPeriod - 1] + .genStartup[thermalGen] + .indicator_var + ) .implies( b.commitmentPeriod[commitmentPeriod].genOn[thermalGen].indicator_var ) @@ -1128,71 +1166,158 @@ def consistent_commitment_on_after_startup(b, commitmentPeriod, thermalGen): ) # JSC update: Done(?) - @b.LogicalConstraint(b.commitmentPeriods,m.thermalGenerators) + @b.LogicalConstraint(b.commitmentPeriods, m.thermalGenerators) def consistent_commitment_uptime(b, commitmentPeriod, thermalGen): return ( - atmost(int(m.md.data["elements"]["generator"][thermalGen]["min_up_time"])-1, - [b.commitmentPeriod[commitmentPeriod-j-1].genOn[thermalGen].indicator_var for j in - range(min(int(m.md.data["elements"]["generator"][thermalGen]["min_up_time"]),commitmentPeriod-1))]) - .land(b.commitmentPeriod[commitmentPeriod-1].genOn[thermalGen].indicator_var).implies( - b.commitmentPeriod[commitmentPeriod].genOn[thermalGen].indicator_var - ) - if commitmentPeriod != 1 #int(m.md.data["elements"]["generator"][thermalGen]["min_up_time"])+1 - else LogicalConstraint.Skip + atmost( + int(m.md.data["elements"]["generator"][thermalGen]["min_up_time"]) - 1, + [ + b.commitmentPeriod[commitmentPeriod - j - 1] + .genOn[thermalGen] + .indicator_var + for j in range( + min( + int( + m.md.data["elements"]["generator"][thermalGen][ + "min_up_time" + ] + ), + commitmentPeriod - 1, + ) + ) + ], + ) + .land( + b.commitmentPeriod[commitmentPeriod - 1].genOn[thermalGen].indicator_var + ) + .implies( + b.commitmentPeriod[commitmentPeriod].genOn[thermalGen].indicator_var + ) + if commitmentPeriod + != 1 # int(m.md.data["elements"]["generator"][thermalGen]["min_up_time"])+1 + else LogicalConstraint.Skip ) - - @b.LogicalConstraint(b.commitmentPeriods,m.thermalGenerators) + + @b.LogicalConstraint(b.commitmentPeriods, m.thermalGenerators) def consistent_commitment_shutdown_after_uptime(b, commitmentPeriod, thermalGen): return ( - (atleast(int(m.md.data["elements"]["generator"][thermalGen]["min_up_time"]), - [b.commitmentPeriod[commitmentPeriod-j-1].genOn[thermalGen].indicator_var for j in - range(min(int(m.md.data["elements"]["generator"][thermalGen]["min_up_time"]),commitmentPeriod-1))]) - .land(b.commitmentPeriod[commitmentPeriod-1].genOn[thermalGen].indicator_var)).implies( - b.commitmentPeriod[commitmentPeriod].genOn[thermalGen].indicator_var - | b.commitmentPeriod[commitmentPeriod].genShutdown[thermalGen].indicator_var + ( + atleast( + int(m.md.data["elements"]["generator"][thermalGen]["min_up_time"]), + [ + b.commitmentPeriod[commitmentPeriod - j - 1] + .genOn[thermalGen] + .indicator_var + for j in range( + min( + int( + m.md.data["elements"]["generator"][thermalGen][ + "min_up_time" + ] + ), + commitmentPeriod - 1, + ) + ) + ], + ).land( + b.commitmentPeriod[commitmentPeriod - 1] + .genOn[thermalGen] + .indicator_var + ) + ).implies( + b.commitmentPeriod[commitmentPeriod].genOn[thermalGen].indicator_var + | b.commitmentPeriod[commitmentPeriod] + .genShutdown[thermalGen] + .indicator_var ) if commitmentPeriod != 1 else LogicalConstraint.Skip ) # JSC update: Done(?) - # If at most n-1 of the previous (not counting current) n periods were down, + # If at most n-1 of the previous (not counting current) n periods were down, # and the previous period was down, then the current period must also be down. # (n is the minimum downtime) - @b.LogicalConstraint(b.commitmentPeriods,m.thermalGenerators) + @b.LogicalConstraint(b.commitmentPeriods, m.thermalGenerators) def consistent_commitment_downtime(b, commitmentPeriod, thermalGen): return ( - (atmost(int(m.md.data["elements"]["generator"][thermalGen]["min_down_time"])-1, - [b.commitmentPeriod[commitmentPeriod-j-1].genOff[thermalGen].indicator_var for j in - range(min(int(m.md.data["elements"]["generator"][thermalGen]["min_down_time"]),commitmentPeriod-1))]) - .land(b.commitmentPeriod[commitmentPeriod-1].genOff[thermalGen].indicator_var)).implies( - b.commitmentPeriod[commitmentPeriod].genOff[thermalGen].indicator_var - ) - if commitmentPeriod != 1 #>= int(m.md.data["elements"]["generator"][thermalGen]["min_down_time"])+1 - else LogicalConstraint.Skip + ( + atmost( + int(m.md.data["elements"]["generator"][thermalGen]["min_down_time"]) + - 1, + [ + b.commitmentPeriod[commitmentPeriod - j - 1] + .genOff[thermalGen] + .indicator_var + for j in range( + min( + int( + m.md.data["elements"]["generator"][thermalGen][ + "min_down_time" + ] + ), + commitmentPeriod - 1, + ) + ) + ], + ).land( + b.commitmentPeriod[commitmentPeriod - 1] + .genOff[thermalGen] + .indicator_var + ) + ).implies( + b.commitmentPeriod[commitmentPeriod].genOff[thermalGen].indicator_var + ) + if commitmentPeriod + != 1 # >= int(m.md.data["elements"]["generator"][thermalGen]["min_down_time"])+1 + else LogicalConstraint.Skip ) - - #b.consistent_commitment_downtime.pprint() - #quit() - + + # b.consistent_commitment_downtime.pprint() + # quit() + # JSC update: Done(?) # Trying to make this and the previous constraint cover all logical possibilities for downtime... together they - # should make the original consistent_commitment_inactivity constraint redundant. We don't want just any behavior + # should make the original consistent_commitment_inactivity constraint redundant. We don't want just any behavior # in the first n periods though - it shouldn't be able to violate the min_downtime constraint @b.LogicalConstraint(b.commitmentPeriods, m.thermalGenerators) def consistent_commitment_start_after_downtime(b, commitmentPeriod, thermalGen): return ( - (atleast(int(m.md.data["elements"]["generator"][thermalGen]["min_down_time"]), - [b.commitmentPeriod[commitmentPeriod-j-1].genOff[thermalGen].indicator_var for j in - range(min(int(m.md.data["elements"]["generator"][thermalGen]["min_down_time"]),commitmentPeriod-1))]) - .land(b.commitmentPeriod[commitmentPeriod-1].genOff[thermalGen].indicator_var)).implies( - b.commitmentPeriod[commitmentPeriod].genOff[thermalGen].indicator_var - | b.commitmentPeriod[commitmentPeriod].genStartup[thermalGen].indicator_var + ( + atleast( + int( + m.md.data["elements"]["generator"][thermalGen]["min_down_time"] + ), + [ + b.commitmentPeriod[commitmentPeriod - j - 1] + .genOff[thermalGen] + .indicator_var + for j in range( + min( + int( + m.md.data["elements"]["generator"][thermalGen][ + "min_down_time" + ] + ), + commitmentPeriod - 1, + ) + ) + ], + ).land( + b.commitmentPeriod[commitmentPeriod - 1] + .genOff[thermalGen] + .indicator_var + ) + ).implies( + b.commitmentPeriod[commitmentPeriod].genOff[thermalGen].indicator_var + | b.commitmentPeriod[commitmentPeriod] + .genStartup[thermalGen] + .indicator_var ) if commitmentPeriod != 1 else LogicalConstraint.Skip ) - + # @b.LogicalConstraint(b.commitmentPeriods, m.thermalGenerators) # def consistent_commitment_inactivity(b, commitmentPeriod, thermalGen): # return ( @@ -1229,8 +1354,9 @@ def representative_period_rule( add_representative_period_variables(b, representative_period) add_representative_period_constraints(b, representative_period) - #b.consistent_commitment_downtime.pprint() - #quit() + # b.consistent_commitment_downtime.pprint() + # quit() + def investment_stage_rule( b, @@ -1397,11 +1523,13 @@ def model_data_references(m): # JSC update: Done(?) # Maximum output of each renewable generator m.renewableCapacity = { - renewableGen: 0 - if type(m.md.data["elements"]["generator"][renewableGen]["p_max"])==float - else max( - m.md.data["elements"]["generator"][renewableGen]["p_max"]["values"] - ) + renewableGen: ( + 0 + if type(m.md.data["elements"]["generator"][renewableGen]["p_max"]) == float + else max( + m.md.data["elements"]["generator"][renewableGen]["p_max"]["values"] + ) + ) for renewableGen in m.renewableGenerators } @@ -1409,13 +1537,14 @@ def model_data_references(m): # that can be reliably counted toward planning reserve requirement # TODO: WHAT HAVE I DONE HERE I HATE IT and JSC made it worse... m.renewableCapacityValue = { - renewableGen: 0 - if type(m.md.data["elements"]["generator"][renewableGen]["p_max"])==float - else - min( - m.md.data["elements"]["generator"][renewableGen]["p_max"]["values"] + renewableGen: ( + 0 + if type(m.md.data["elements"]["generator"][renewableGen]["p_max"]) == float + else min( + m.md.data["elements"]["generator"][renewableGen]["p_max"]["values"] + ) + / max(1, m.renewableCapacity[renewableGen]) ) - / max(1, m.renewableCapacity[renewableGen]) for renewableGen in m.renewableGenerators } diff --git a/gtep/gtep_solution.py b/gtep/gtep_solution.py index 0f31342..0373eb1 100644 --- a/gtep/gtep_solution.py +++ b/gtep/gtep_solution.py @@ -708,7 +708,7 @@ def _plot_graph_workhorse(self, df, value_key, parent_key_string, - what_is_a_bus_called='dc_branch', + what_is_a_bus_called='branch', #'dc_branch', units=None, pretty_title="Selected Data", save_dir=".",): @@ -869,14 +869,15 @@ def generate_flow_glyphs(num_glyphs, bot_flow_glyphs = generate_flow_glyphs(len(weights_bot), glyph_type=glyph_type, glyph_rotation=(np.pi)) # for custom bot_flow_glyphs = reversed(bot_flow_glyphs) bot_facecolors = cmap(norm(weights_bot)) - bot_flow_collection = PatchCollection(bot_flow_glyphs, facecolors=bot_facecolors, edgecolors='grey', alpha=0.5) + # bot_flow_collection = PatchCollection(bot_flow_glyphs, facecolors=bot_facecolors, edgecolors='grey', alpha=0.5) # [HACK] # scale and move top and bottom collections - top_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, 0.5) #+ ax_graph.transData + # top_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, 0.5) #+ ax_graph.transData # [HACK] + top_base_transform = Affine2D().scale(sx=1, sy=1.0) + Affine2D().translate(0, 0.0) #+ ax_graph.transData top_flow_collection.set_transform(top_base_transform) bot_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, -0.5)# + ax_graph.transData # bot_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, -0.5) + ax_graph.transData - bot_flow_collection.set_transform(bot_base_transform) + # bot_flow_collection.set_transform(bot_base_transform) # [HACK] # combine collections and move to edge between nodes @@ -888,8 +889,8 @@ def generate_flow_glyphs(num_glyphs, node_distance = np.linalg.norm(end_pos-start_pos) rot_angle_rad = np.arctan2((end_pos[1]-start_pos[1]),(end_pos[0]-start_pos[0])) - along_edge_scale = 0.5 - away_from_edge_scale = 0.05 + along_edge_scale = 0.4 + away_from_edge_scale = 0.1 # set up transformations # stretch to the distance between target nodes length_transform = Affine2D().scale(sx=node_distance*along_edge_scale, sy=1) @@ -903,11 +904,11 @@ def generate_flow_glyphs(num_glyphs, t2 = length_transform + scale_transform + rot_transform + translate_transform + ax_graph.transData top_flow_collection.set_transform(top_flow_collection.get_transform() + t2) - bot_flow_collection.set_transform(bot_flow_collection.get_transform() + t2) + # bot_flow_collection.set_transform(bot_flow_collection.get_transform() + t2) # [HACK] # add collection ax_graph.add_collection(top_flow_collection) - ax_graph.add_collection(bot_flow_collection) + # ax_graph.add_collection(bot_flow_collection) # [HACK] # add edges # define edge colorbar @@ -922,8 +923,10 @@ def generate_flow_glyphs(num_glyphs, end_key = self.data.data['elements'][what_is_a_bus_called][item]['to_bus'] start_pos = graph_node_position_dict[start_key] end_pos = graph_node_position_dict[end_key] - edge_key = f"branch_{start_key}_{end_key}_{value_key}_value" - alt_edge_key = f"branch_{end_key}_{start_key}_{value_key}_value" + # edge_key = f"branch_{start_key}_{end_key}_{value_key}_value" + # alt_edge_key = f"branch_{end_key}_{start_key}_{value_key}_value" + edge_key = f"{item}_{value_key}_value" + alt_edge_key = f"{item}_{value_key}_value" # kind = 'triangle' # kind = 'rectangle'