Skip to content

Commit

Permalink
Add separate_kwargs_with_semicolon = true to .JuliaFormatter.toml (#764)
Browse files Browse the repository at this point in the history
  • Loading branch information
odow authored Jul 25, 2024
1 parent 405d527 commit f5d24b7
Show file tree
Hide file tree
Showing 69 changed files with 337 additions and 327 deletions.
1 change: 1 addition & 0 deletions .JuliaFormatter.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,5 @@ always_for_in = true
always_use_return = true
margin = 80
remove_extra_newlines = true
separate_kwargs_with_semicolon = true
short_to_long_function_def = true
4 changes: 2 additions & 2 deletions benchmarks/benchmark.jl
Original file line number Diff line number Diff line change
Expand Up @@ -106,13 +106,13 @@ function report(io::IO, filename_A::String, filename_B::String)
return
end

filename_A = benchmark(
filename_A = benchmark(;
time_limit = 60,
stopping_rules = [SDDP.BoundStalling(10, 1e-6)],
duality_handler = SDDP.ContinuousConicDuality(),
)

filename_B = benchmark(
filename_B = benchmark(;
time_limit = 60,
stopping_rules = [SDDP.BoundStalling(10, 1e-6)],
duality_handler = SDDP.LagrangianDuality(),
Expand Down
9 changes: 6 additions & 3 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -125,10 +125,10 @@ open(joinpath(@__DIR__, "src", "changelog.md"), "r") do in_io
end
end

Documenter.makedocs(
Documenter.makedocs(;
sitename = "SDDP.jl",
authors = "Oscar Dowson",
format = Documenter.HTML(
format = Documenter.HTML(;
analytics = "G-HZQQDVMPZW",
# See https://github.com/JuliaDocs/Documenter.jl/issues/868
prettyurls = get(ENV, "CI", nothing) == "true",
Expand Down Expand Up @@ -180,4 +180,7 @@ Documenter.makedocs(
doctestfilters = [r"[\s\-]?\d\.\d{6}e[\+\-]\d{2}"],
)

Documenter.deploydocs(repo = "github.com/odow/SDDP.jl.git", push_preview = true)
Documenter.deploydocs(;
repo = "github.com/odow/SDDP.jl.git",
push_preview = true,
)
4 changes: 2 additions & 2 deletions docs/src/examples/FAST_production_management.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,5 +38,5 @@ function fast_production_management(; cut_type)
@test SDDP.calculate_bound(model) -23.96 atol = 1e-2
end

fast_production_management(cut_type = SDDP.SINGLE_CUT)
fast_production_management(cut_type = SDDP.MULTI_CUT)
fast_production_management(; cut_type = SDDP.SINGLE_CUT)
fast_production_management(; cut_type = SDDP.MULTI_CUT)
2 changes: 1 addition & 1 deletion docs/src/examples/FAST_quickstart.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ using SDDP, HiGHS, Test

function fast_quickstart()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(2),
SDDP.LinearGraph(2);
lower_bound = -5,
optimizer = HiGHS.Optimizer,
) do sp, t
Expand Down
8 changes: 4 additions & 4 deletions docs/src/examples/Hydro_thermal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ graph = SDDP.UnicyclicGraph(0.95; num_nodes = 3)
# probability mass vector can also be provided.

model = SDDP.PolicyGraph(
graph,
graph;
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
Expand All @@ -90,14 +90,14 @@ end
# achieved using [`SDDP.train`](@ref). There are many options that can be passed, but
# `iteration_limit` terminates the training after the prescribed number of SDDP iterations.

SDDP.train(model, iteration_limit = 100)
SDDP.train(model; iteration_limit = 100)

# ## Simulating the policy

# After training, we can simulate the policy using [`SDDP.simulate`](@ref).

sims = SDDP.simulate(model, 100, [:g_t])
mu = round(mean([s[1][:g_t] for s in sims]), digits = 2)
mu = round(mean([s[1][:g_t] for s in sims]); digits = 2)
println("On average, $(mu) units of thermal are used in the first stage.")

# ## Extracting the water values
Expand All @@ -108,4 +108,4 @@ println("On average, $(mu) units of thermal are used in the first stage.")
# decrease in the expected long-run cost.

V = SDDP.ValueFunction(model[1])
cost, price = SDDP.evaluate(V, x = 10)
cost, price = SDDP.evaluate(V; x = 10)
2 changes: 1 addition & 1 deletion docs/src/examples/StochDynamicProgramming.jl_multistock.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
using SDDP, HiGHS, Test

function test_multistock_example()
model = SDDP.LinearPolicyGraph(
model = SDDP.LinearPolicyGraph(;
stages = 5,
lower_bound = -5.0,
optimizer = HiGHS.Optimizer,
Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/StochDynamicProgramming.jl_stock.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ using SDDP, HiGHS, Test

function stock_example()
model = SDDP.PolicyGraph(
SDDP.LinearGraph(5),
SDDP.LinearGraph(5);
lower_bound = -2,
optimizer = HiGHS.Optimizer,
) do sp, stage
Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/StructDualDynProg.jl_prob5.2_2stages.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
using SDDP, HiGHS, Test

function test_prob52_2stages()
model = SDDP.LinearPolicyGraph(
model = SDDP.LinearPolicyGraph(;
stages = 2,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/StructDualDynProg.jl_prob5.2_3stages.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
using SDDP, HiGHS, Test

function test_prob52_3stages()
model = SDDP.LinearPolicyGraph(
model = SDDP.LinearPolicyGraph(;
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/agriculture_mccardle_farm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ function test_mccardle_farm_model()
])

model = SDDP.PolicyGraph(
graph,
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, index
Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/air_conditioning.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
using SDDP, HiGHS, Test

function air_conditioning_model(duality_handler)
model = SDDP.LinearPolicyGraph(
model = SDDP.LinearPolicyGraph(;
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/air_conditioning_forward.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import HiGHS
import Test

function create_air_conditioning_model(; convex::Bool)
return SDDP.LinearPolicyGraph(
return SDDP.LinearPolicyGraph(;
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/all_blacks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ function all_blacks()
## Number of time periods, number of seats, R_ij = revenue from selling seat
## i at time j, offer_ij = whether an offer for seat i will come at time j
(T, N, R, offer) = (3, 2, [3 3 6; 3 3 6], [1 1 0; 1 0 1])
model = SDDP.LinearPolicyGraph(
model = SDDP.LinearPolicyGraph(;
stages = T,
sense = :Max,
upper_bound = 100.0,
Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/asset_management_simple.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ function asset_management_simple()
[0.5 0.5; 0.5 0.5],
[0.5 0.5; 0.5 0.5],
],
),
);
lower_bound = -1_000.0,
optimizer = HiGHS.Optimizer,
) do subproblem, index
Expand Down
8 changes: 4 additions & 4 deletions docs/src/examples/asset_management_stagewise.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ function asset_management_stagewise(; cut_type)
Phi = [-1, 5]
Psi = [0.02, 0.0]

model = SDDP.MarkovianPolicyGraph(
model = SDDP.MarkovianPolicyGraph(;
sense = :Max,
transition_matrices = Array{Float64,2}[
[1.0]',
Expand Down Expand Up @@ -63,14 +63,14 @@ function asset_management_stagewise(; cut_type)
if node[1] != 3
SDDP.Expectation()
else
SDDP.EAVaR(lambda = 0.5, beta = 0.5)
SDDP.EAVaR(; lambda = 0.5, beta = 0.5)
end
end,
)
@test SDDP.calculate_bound(model) 1.278 atol = 1e-3
return
end

asset_management_stagewise(cut_type = SDDP.SINGLE_CUT)
asset_management_stagewise(; cut_type = SDDP.SINGLE_CUT)

asset_management_stagewise(cut_type = SDDP.MULTI_CUT)
asset_management_stagewise(; cut_type = SDDP.MULTI_CUT)
2 changes: 1 addition & 1 deletion docs/src/examples/belief.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ function inventory_management_problem()
SDDP.add_ambiguity_set(graph, [:Ah, :Bh], 1e2)

model = SDDP.PolicyGraph(
graph,
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, node
Expand Down
4 changes: 2 additions & 2 deletions docs/src/examples/biobjective_hydro.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
using SDDP, HiGHS, Statistics, Test

function biobjective_example()
model = SDDP.LinearPolicyGraph(
model = SDDP.LinearPolicyGraph(;
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
Expand Down Expand Up @@ -45,7 +45,7 @@ function biobjective_example()
end
end
pareto_weights =
SDDP.train_biobjective(model, solution_limit = 10, iteration_limit = 10)
SDDP.train_biobjective(model; solution_limit = 10, iteration_limit = 10)
solutions = [(k, v) for (k, v) in pareto_weights]
sort!(solutions; by = x -> x[1])
@test length(solutions) == 10
Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/booking_management.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ function booking_management_model(num_days, num_rooms, num_requests)
end
end

return model = SDDP.LinearPolicyGraph(
return model = SDDP.LinearPolicyGraph(;
stages = num_requests,
upper_bound = max_revenue,
sense = :Max,
Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/generation_expansion.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ function generation_expansion(duality_handler)
penalty = 5e5
## Discounting rate
rho = 0.99
model = SDDP.LinearPolicyGraph(
model = SDDP.LinearPolicyGraph(;
stages = 5,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
Expand Down
42 changes: 22 additions & 20 deletions docs/src/examples/hydro_valley.jl
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ function hydro_valley_model(;
N = length(valley_chain)

## Initialise SDDP Model
return m = SDDP.MarkovianPolicyGraph(
return m = SDDP.MarkovianPolicyGraph(;
sense = sense,
lower_bound = lower,
upper_bound = upper,
Expand Down Expand Up @@ -215,47 +215,49 @@ function test_hydro_valley_model()
Random.seed!(11111)

## deterministic
deterministic_model =
hydro_valley_model(hasmarkovprice = false, hasstagewiseinflows = false)
deterministic_model = hydro_valley_model(;
hasmarkovprice = false,
hasstagewiseinflows = false,
)
SDDP.train(
deterministic_model,
deterministic_model;
iteration_limit = 10,
cut_deletion_minimum = 1,
print_level = 0,
)
@test SDDP.calculate_bound(deterministic_model) 835.0 atol = 1e-3

## stagewise inflows
stagewise_model = hydro_valley_model(hasmarkovprice = false)
SDDP.train(stagewise_model, iteration_limit = 20, print_level = 0)
stagewise_model = hydro_valley_model(; hasmarkovprice = false)
SDDP.train(stagewise_model; iteration_limit = 20, print_level = 0)
@test SDDP.calculate_bound(stagewise_model) 838.33 atol = 1e-2

## Markov prices
markov_model = hydro_valley_model(hasstagewiseinflows = false)
SDDP.train(markov_model, iteration_limit = 10, print_level = 0)
markov_model = hydro_valley_model(; hasstagewiseinflows = false)
SDDP.train(markov_model; iteration_limit = 10, print_level = 0)
@test SDDP.calculate_bound(markov_model) 851.8 atol = 1e-2

## stagewise inflows and Markov prices
markov_stagewise_model =
hydro_valley_model(hasstagewiseinflows = true, hasmarkovprice = true)
SDDP.train(markov_stagewise_model, iteration_limit = 10, print_level = 0)
hydro_valley_model(; hasstagewiseinflows = true, hasmarkovprice = true)
SDDP.train(markov_stagewise_model; iteration_limit = 10, print_level = 0)
@test SDDP.calculate_bound(markov_stagewise_model) 855.0 atol = 1.0

## risk averse stagewise inflows and Markov prices
riskaverse_model = hydro_valley_model()
SDDP.train(
riskaverse_model,
risk_measure = SDDP.EAVaR(lambda = 0.5, beta = 0.66),
riskaverse_model;
risk_measure = SDDP.EAVaR(; lambda = 0.5, beta = 0.66),
iteration_limit = 10,
print_level = 0,
)
@test SDDP.calculate_bound(riskaverse_model) 828.157 atol = 1.0

## stagewise inflows and Markov prices
worst_case_model = hydro_valley_model(sense = :Min)
worst_case_model = hydro_valley_model(; sense = :Min)
SDDP.train(
worst_case_model,
risk_measure = SDDP.EAVaR(lambda = 0.5, beta = 0.0),
worst_case_model;
risk_measure = SDDP.EAVaR(; lambda = 0.5, beta = 0.0),
iteration_limit = 10,
print_level = 0,
)
Expand All @@ -264,26 +266,26 @@ function test_hydro_valley_model()
## stagewise inflows and Markov prices
cutselection_model = hydro_valley_model()
SDDP.train(
cutselection_model,
cutselection_model;
iteration_limit = 10,
print_level = 0,
cut_deletion_minimum = 2,
)
@test SDDP.calculate_bound(cutselection_model) 855.0 atol = 1.0

## Distributionally robust Optimization
dro_model = hydro_valley_model(hasmarkovprice = false)
dro_model = hydro_valley_model(; hasmarkovprice = false)
SDDP.train(
dro_model,
dro_model;
risk_measure = SDDP.ModifiedChiSquared(sqrt(2 / 3) - 1e-6),
iteration_limit = 10,
print_level = 0,
)
@test SDDP.calculate_bound(dro_model) 835.0 atol = 1.0

dro_model = hydro_valley_model(hasmarkovprice = false)
dro_model = hydro_valley_model(; hasmarkovprice = false)
SDDP.train(
dro_model,
dro_model;
risk_measure = SDDP.ModifiedChiSquared(1 / 6),
iteration_limit = 20,
print_level = 0,
Expand Down
6 changes: 3 additions & 3 deletions docs/src/examples/infinite_horizon_hydro_thermal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ function infinite_hydro_thermal(; cut_type)
model;
cut_type = cut_type,
log_frequency = 100,
sampling_scheme = SDDP.InSampleMonteCarlo(terminate_on_cycle = true),
sampling_scheme = SDDP.InSampleMonteCarlo(; terminate_on_cycle = true),
cycle_discretization_delta = 0.1,
)
@test SDDP.calculate_bound(model) 119.167 atol = 0.1
Expand All @@ -68,5 +68,5 @@ function infinite_hydro_thermal(; cut_type)
return
end

infinite_hydro_thermal(cut_type = SDDP.SINGLE_CUT)
infinite_hydro_thermal(cut_type = SDDP.MULTI_CUT)
infinite_hydro_thermal(; cut_type = SDDP.SINGLE_CUT)
infinite_hydro_thermal(; cut_type = SDDP.MULTI_CUT)
2 changes: 1 addition & 1 deletion docs/src/examples/infinite_horizon_trivial.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ function infinite_trivial()
[(:root_node => :week, 1.0), (:week => :week, 0.9)],
)
model = SDDP.PolicyGraph(
graph,
graph;
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, node
Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/no_strong_duality.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ function no_strong_duality()
:root,
[:node],
[(:root => :node, 1.0), (:node => :node, 0.5)],
),
);
optimizer = HiGHS.Optimizer,
lower_bound = 0.0,
) do sp, t
Expand Down
Loading

0 comments on commit f5d24b7

Please sign in to comment.