pandakota.input.methods module

Methods

DAKOTA Methods

class pandakota.input.methods.ColinyCobylaOptimize(max_iterations: int, max_function_evaluations: int, convergence_tolerance: float, initial_delta: float | None = None, variable_tolerance: float | None = None, solution_target: float | None = None)

Bases: Optimize

Constrained Optimization BY Linear Approximations

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
refinements

Methods

add_refinement

to_string

optimize_type = 'coliny_cobyla'
to_string() str
class pandakota.input.methods.JegaOptimize(max_iterations: int, max_function_evaluations: int, convergence_tolerance: float, population_size: int, seed: int, replacement_type: str | None = None, convergence_type: str | None = None, initialization_type: str | None = None, crossover_type: str | None = None, crossover_rate: float | None = None, mutation_type: str | None = None, mutation_rate: float | None = None, num_parents: int | None = None, num_offspring: int | None = None, flat_file_path: str | None = None)

Bases: Optimize

Abstract Base Class for JEGA methods

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
population_size
refinements
seed

Methods

add_refinement

check_convergence_type

check_crossover_type

check_initialization_type

check_mutation_type

check_replacement_type

to_string

classmethod check_convergence_type(ct: str)
classmethod check_crossover_type(cot: str)
classmethod check_initialization_type(it: str, flat_file_path: str)
classmethod check_mutation_type(mt: str)
classmethod check_replacement_type(rt: str)
convergence_types = {}
crossover_types = {'multi_point_binary', 'multi_point_parameterized_binary', 'multi_point_real', 'shuffle_random'}
initialization_types = {'flat_file', 'simple_random', 'unique_random'}
mutation_types = {'bit_random', 'offset_cauchy', 'offset_normal', 'offset_uniform', 'replace_uniform'}
optimize_type = "jega  # TODO: Change to 'soga' or 'moga'."
property population_size
replacement_types = {'elitist', 'roulette_wheel', 'unique_roulette_wheel'}
property seed
to_string() str
type_rate_pairs = {'crossover_type': 'crossover_rate', 'mutation_type': 'mutation_rate'}
class pandakota.input.methods.LatinHypercubeSampling(nsamples: int, seed: int)

Bases: Sampling

LHS

Attributes:
nsamples
refinements

Methods

add_refinement

to_string

add_refinement(refinement_samples: int | None = None)
sample_type = 'lhs'
class pandakota.input.methods.Method

Bases: ABC

Abstract Base Class for all methods

Attributes:
refinements

Methods

add_refinement

to_string

add_refinement(refinement_samples: int)
property refinements
requires_gradients = False
requires_hessians = False
abstract to_string() str
class pandakota.input.methods.MogaOptimize(*args, niching_type=None, postprocessor_type=None, **kwargs)

Bases: JegaOptimize

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
population_size
refinements
seed

Methods

add_refinement

check_convergence_type

check_crossover_type

check_initialization_type

check_mutation_type

check_replacement_type

to_string

convergence_types = {'metric_tracker', 'num_generations', 'percent_change'}
optimize_type = 'moga'
replacement_types = {'below_limit', 'elitist', 'roulette_wheel', 'unique_roulette_wheel'}
class pandakota.input.methods.MonteCarloSampling(nsamples: int, seed: int)

Bases: Sampling

Random sampling

Attributes:
nsamples
refinements

Methods

add_refinement

to_string

sample_type = 'random'
class pandakota.input.methods.NcsuDirectOptimize(max_iterations: int, max_function_evaluations: int, convergence_tolerance: int, solution_target: float | None = None, min_boxsize_limit: float | None = None, volume_boxsize_limit: float | None = None)

Bases: Optimize

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
min_boxsize_limit
refinements
solution_target
volume_boxsize_limit

Methods

add_refinement

to_string

property min_boxsize_limit
optimize_type = 'ncsu_direct'
property solution_target
to_string() str
property volume_boxsize_limit
class pandakota.input.methods.NlpqlSqpOptimize(max_iterations: int, max_function_evaluations: int, convergence_tolerance: float)

Bases: Optimize

NLPQL Sequential Quadratic Program

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
refinements

Methods

add_refinement

to_string

optimize_type = 'nlpql_sqp'
class pandakota.input.methods.Optimize(max_iterations: int, max_function_evaluations: int, convergence_tolerance: float)

Bases: Method

Abstract Base Class for all optimization methods

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
optimize_type
refinements

Methods

add_refinement

to_string

property convergence_tolerance
function_key = 'objective_functions'
property max_function_evaluations
property max_iterations
optimize_type = None
to_string() str
class pandakota.input.methods.OptppCgOptimize(max_iterations: int, max_function_evaluations: int, convergence_tolerance: float, max_step: float | None = None, gradient_tolerance: float | None = None, speculative: bool = False)

Bases: OptppOptimize

Conjugate gradient optimization

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
refinements

Methods

add_refinement

check_merit_function

to_string

optimize_type = 'optpp_cg'
requires_gradients = True
class pandakota.input.methods.OptppFdNewtonOptimize(*args, search_method: str | None = None, centering_parameter: float | None = None, steplength_to_boundary: float | None = None, **kwargs)

Bases: OptppNewtonOptimize

Finite Difference Newton optimization method

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
refinements

Methods

add_refinement

check_merit_function

check_search_method

to_string

optimize_type = 'optpp_fd_newton'
requires_gradients = True
requires_hessians = False
class pandakota.input.methods.OptppGNewtonOptimize(*args, search_method: str | None = None, centering_parameter: float | None = None, steplength_to_boundary: float | None = None, **kwargs)

Bases: OptppNewtonOptimize

Newton optimization method based on least-squares calibration

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
refinements

Methods

add_refinement

check_merit_function

check_search_method

to_string

optimize_type = 'optpp_g_newton'
requires_gradients = False
requires_hessians = False
class pandakota.input.methods.OptppNewtonOptimize(*args, search_method: str | None = None, centering_parameter: float | None = None, steplength_to_boundary: float | None = None, **kwargs)

Bases: OptppOptimize

The optpp_newton method, and base class for other Opt++ Newton methods

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
refinements

Methods

add_refinement

check_merit_function

check_search_method

to_string

classmethod check_search_method(sm)
optimize_type = 'optpp_newton'
requires_gradients = True
requires_hessians = True
search_methods = {'gradient_based_line_search', 'tr_pds', 'trust_region', 'value_based_line_search'}
to_string() str
class pandakota.input.methods.OptppOptimize(max_iterations: int, max_function_evaluations: int, convergence_tolerance: float, max_step: float | None = None, gradient_tolerance: float | None = None, speculative: bool = False, merit_function: str | None = None)

Bases: Optimize

Abstract Base Class for Opt++ family of local optimizers

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
refinements

Methods

add_refinement

check_merit_function

to_string

classmethod check_merit_function(mf: str)
merit_functions = {'argaez_tapia', 'el_bakry', 'van_shanno'}
optimize_type = 'optpp  # TODO: change to one of the optpp_* methods'
to_string() str
class pandakota.input.methods.OptppPdsOptimize(max_iterations: int, max_function_evaluations: int, convergence_tolerance: float, search_scheme_size=None)

Bases: OptppOptimize

Simplex-based derivative-free optimization

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
refinements

Methods

add_refinement

check_merit_function

to_string

optimize_type = 'optpp_pds'
to_string() str
class pandakota.input.methods.OptppQNewtonOptimize(*args, search_method: str | None = None, centering_parameter: float | None = None, steplength_to_boundary: float | None = None, **kwargs)

Bases: OptppNewtonOptimize

Quasi-Newton optimization method

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
refinements

Methods

add_refinement

check_merit_function

check_search_method

to_string

optimize_type = 'optpp_q_newton'
requires_gradients = True
requires_hessians = False
class pandakota.input.methods.Sampling(nsamples: int, seed: int)

Bases: Method

Abstract Base Class for all sampling methods

Attributes:
nsamples
refinements
sample_type

Methods

add_refinement

to_string

function_key = 'response_functions'
property nsamples
sample_type = None
to_string() str
class pandakota.input.methods.SogaOptimize(max_iterations: int, max_function_evaluations: int, convergence_tolerance: float, population_size: int, seed: int, replacement_type: str | None = None, convergence_type: str | None = None, initialization_type: str | None = None, crossover_type: str | None = None, crossover_rate: float | None = None, mutation_type: str | None = None, mutation_rate: float | None = None, num_parents: int | None = None, num_offspring: int | None = None, flat_file_path: str | None = None)

Bases: JegaOptimize

Attributes:
convergence_tolerance
max_function_evaluations
max_iterations
population_size
refinements
seed

Methods

add_refinement

check_convergence_type

check_crossover_type

check_initialization_type

check_mutation_type

check_replacement_type

to_string

convergence_types = {'average_fitness_tracker', 'best_fitness_tracker'}
optimize_type = 'soga'
replacement_types = {'elitist', 'favor_feasible', 'roulette_wheel', 'unique_roulette_wheel'}