Skip to content

Standardize to nvars #20

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions docs/source/adaptive_leja_sequences.rst
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ Our goal is to demonstrate how to use a polynomial chaos expansion (PCE) to appr

c = np.array([10,0.01])
model = GenzFunction(
"oscillatory",variable.num_vars(),c=c,w=np.zeros_like(c))
"oscillatory",variable.nvars,c=c,w=np.zeros_like(c))
# model.set_coefficients(4,'exponential-decay')

Here we have intentionally set the coefficients :math:`c`: of the Genz function to be highly anisotropic, to emphasize the properties of the adaptive algorithm.
Expand Down Expand Up @@ -105,9 +105,9 @@ Now we setup the adaptive algorithm.
error_tol=1e-10

candidate_samples=-np.cos(
np.random.uniform(0,np.pi,(var_trans.num_vars(),int(1e4))))
np.random.uniform(0,np.pi,(var_trans.nvars,int(1e4))))
pce = AdaptiveLejaPCE(
var_trans.num_vars(),candidate_samples,factorization_type='fast')
var_trans.nvars,candidate_samples,factorization_type='fast')

max_level=np.inf
max_level_1d=[max_level]*(pce.num_vars)
Expand Down
8 changes: 4 additions & 4 deletions docs/source/polynomial_chaos_interpolation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,10 @@ Our goal is to demonstrate how to use a polynomial chaos expansion (PCE) to appr
univariate_variables = [uniform(),beta(3,3)]
variable = IndependentMultivariateRandomVariable(univariate_variables)

c = np.random.uniform(0.,1.,variable.num_vars())
c = np.random.uniform(0.,1.,variable.nvars)
c*=4/c.sum()
w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1)
model = GenzFunction( "oscillatory",variable.num_vars(),c=c,w=w )
model = GenzFunction( "oscillatory",variable.nvars,c=c,w=w )

PCE represent the model output :math:`f(\V{\rv})` as an expansion in orthonormal polynomials,

Expand Down Expand Up @@ -129,7 +129,7 @@ To set the PCE truncation to a third degree total-degree index set use
close-figs

degree=3
indices = compute_hyperbolic_indices(poly.num_vars(),degree,1.0)
indices = compute_hyperbolic_indices(poly.nvars,degree,1.0)
poly.set_indices(indices)

Now we have defined the PCE, we are now must compute its coefficients. Pyapprox supports a number of methods to compute the polynomial coefficients. Here we will use interpolation. Specifically we evaluate the function at a set of samples :math:`\mathcal{Z}=[\V{\rv}^{(1)},\ldots,\V{\rv}^{(M)}]` to obtain a set of function values :math:`\V{f}=[\V{f}^{(1)},\ldots,\V{f}^{(M)}]^T`. The function may be vectored valued and thus each :math:`\V{f}^{(i)}\in\mathbb{R}^Q` is a vector and :math:`\V{F}\in\mathbb{R}^{M\times Q}` is a matrix
Expand All @@ -149,7 +149,7 @@ Sampling from this measure is asymptorically optimal (as degree increases) for a
close-figs

ntrain_samples = int(poly.indices.shape[1]*1.1)
train_samples = -np.cos(np.random.uniform(0,2*np.pi,(poly.num_vars(),ntrain_samples)))
train_samples = -np.cos(np.random.uniform(0,2*np.pi,(poly.nvars,ntrain_samples)))
train_samples = var_trans.map_from_canonical_space(train_samples)
train_values = model(train_samples)

Expand Down
8 changes: 4 additions & 4 deletions examples/plot_design_under_uncertainty.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,10 @@
nsamples = 10
samples = pya.generate_independent_random_samples(benchmark.variable,nsamples)
fun = ActiveSetVariableModel(
benchmark.fun,benchmark.variable.num_vars()+benchmark.design_variable.num_vars(),
benchmark.fun,benchmark.variable.nvars+benchmark.design_variable.nvars,
samples,benchmark.design_var_indices)
jac = ActiveSetVariableModel(
benchmark.jac,benchmark.variable.num_vars()+benchmark.design_variable.num_vars(),
benchmark.jac,benchmark.variable.nvars+benchmark.design_variable.nvars,
samples,benchmark.design_var_indices)

nsamples = 10000
Expand All @@ -49,9 +49,9 @@
seed=1
generate_sample_data = partial(
generate_monte_carlo_quadrature_data,generate_random_samples,
benchmark.variable.num_vars(),benchmark.design_var_indices,seed=seed)
benchmark.variable.nvars,benchmark.design_var_indices,seed=seed)

num_vars = benchmark.variable.num_vars()+benchmark.design_variable.num_vars()
num_vars = benchmark.variable.nvars+benchmark.design_variable.nvars
objective = StatisticalConstraint(
benchmark.fun,benchmark.jac,expectation_fun,expectation_jac,num_vars,
benchmark.design_var_indices,generate_sample_data,isobjective=True)
Expand Down
21 changes: 12 additions & 9 deletions pyapprox/adaptive_sparse_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -577,19 +577,19 @@ def convert_sparse_grid_to_polynomial_chaos_expansion(sparse_grid, pce_opts,
pce = PolynomialChaosExpansion()
pce.configure(pce_opts)
if sparse_grid.config_variables_idx is not None:
assert pce.num_vars() == sparse_grid.config_variables_idx
assert pce.nvars == sparse_grid.config_variables_idx
else:
assert pce.num_vars() == sparse_grid.num_vars
assert pce.nvars == sparse_grid.num_vars

def get_recursion_coefficients(N, dd):
pce.update_recursion_coefficients([N]*pce.num_vars())
pce.update_recursion_coefficients([N]*pce.nvars)
return pce.recursion_coeffs[pce.basis_type_index_map[dd]].copy()

coeffs_1d = [
convert_univariate_lagrange_basis_to_orthonormal_polynomials(
sparse_grid.samples_1d[dd],
partial(get_recursion_coefficients, dd=dd))
for dd in range(pce.num_vars())]
for dd in range(pce.nvars)]

indices_list = []
coeffs_list = []
Expand Down Expand Up @@ -1070,10 +1070,10 @@ def set_config_variable_index(self, idx, config_var_trans=None):
self.config_var_trans = config_var_trans
self.num_config_vars = self.num_vars-self.config_variables_idx
if self.variable_transformation is not None:
assert (self.variable_transformation.num_vars() ==
assert (self.variable_transformation.nvars ==
self.config_variables_idx)
if self.config_var_trans is not None:
assert self.num_config_vars == self.config_var_trans.num_vars()
assert self.num_config_vars == self.config_var_trans.nvars

def eval_cost_function(self, samples):
config_samples = self.map_config_samples_from_canonical_space(
Expand Down Expand Up @@ -1255,9 +1255,9 @@ def get_sparse_grid_univariate_leja_quadrature_rules(
unique_max_level_1d = \
get_sparse_grid_univariate_leja_quadrature_rules_economical(
var_trans, growth_rules=None)
quad_rules = [None for ii in var_trans.num_vars()]
growth_rules = [None for ii in var_trans.num_vars()]
max_level_1d = [None for ii in var_trans.num_vars()]
quad_rules = [None for ii in var_trans.nvars]
growth_rules = [None for ii in var_trans.nvars]
max_level_1d = [None for ii in var_trans.nvars]
for quad_rule, growth_rule, indices, max_level in zip(
unique_quad_rules, unique_growth_rules, unique_quadrule_indices,
unique_max_level_1d):
Expand Down Expand Up @@ -1632,6 +1632,9 @@ def num_vars(self):
-------
The number of configure variables
"""
import warnings
warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead",
PendingDeprecationWarning)
return self.nvars


Expand Down
18 changes: 9 additions & 9 deletions pyapprox/approximate.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,9 +175,9 @@ def adaptive_approximate_sparse_grid(
The sparse grid approximation
"""
var_trans = AffineRandomVariableTransformation(variables)
nvars = var_trans.num_vars()
nvars = var_trans.nvars
if config_var_trans is not None:
nvars += config_var_trans.num_vars()
nvars += config_var_trans.nvars
sparse_grid = CombinationSparseGrid(nvars)

if max_level_1d is None:
Expand Down Expand Up @@ -255,7 +255,7 @@ def __initialize_leja_pce(
else:
break

nvars = variables.num_vars()
nvars = variables.nvars
if generate_candidate_samples is None:
# Todo implement default for non-bounded variables that uses induced
# sampling
Expand Down Expand Up @@ -290,7 +290,7 @@ def __setup_adaptive_pce(pce, verbose, fun, var_trans, growth_rules,
refinement_indicator, admissibility_function, growth_rules,
unique_quadrule_indices=unique_quadrule_indices)

nvars = var_trans.num_vars()
nvars = var_trans.nvars
if max_level_1d is None:
max_level_1d = [np.inf]*nvars
elif np.isscalar(max_level_1d):
Expand Down Expand Up @@ -408,7 +408,7 @@ def adaptive_approximate_polynomial_chaos_induced(
var_trans = AffineRandomVariableTransformation(variables)

pce = AdaptiveInducedPCE(
var_trans.num_vars(), induced_sampling=induced_sampling,
var_trans.nvars, induced_sampling=induced_sampling,
cond_tol=cond_tol, fit_opts=fit_opts)

__setup_adaptive_pce(pce, verbose, fun, var_trans, growth_rules,
Expand Down Expand Up @@ -657,7 +657,7 @@ def adaptive_approximate_gaussian_process(
"""
assert max_nsamples <= ncandidate_samples

nvars = variables.num_vars()
nvars = variables.nvars

if normalize_inputs:
var_trans = AffineRandomVariableTransformation(variables)
Expand Down Expand Up @@ -1231,7 +1231,7 @@ def _cross_validate_pce_degree(
rng_state = np.random.get_state()
for degree in range(min_degree, max_degree+1):
indices = compute_hyperbolic_indices(
pce.num_vars(), degree, hcross_strength)
pce.nvars, degree, hcross_strength)
pce.set_indices(indices)
if ((pce.num_terms() > 100000) and
(100000-prev_num_terms < pce.num_terms()-100000)):
Expand All @@ -1258,7 +1258,7 @@ def _cross_validate_pce_degree(
prev_num_terms = pce.num_terms()

pce.set_indices(compute_hyperbolic_indices(
pce.num_vars(), best_degree, hcross_strength))
pce.nvars, best_degree, hcross_strength))
pce.set_coefficients(best_coef)
if verbose > 0:
print('best degree:', best_degree)
Expand Down Expand Up @@ -1430,7 +1430,7 @@ def _expanding_basis_pce(pce, train_samples, train_vals, hcross_strength=1,
max_iters=20,
max_num_step_increases=1):
assert train_vals.shape[1] == 1
num_vars = pce.num_vars()
num_vars = pce.nvars

if max_num_init_terms is None:
max_num_init_terms = train_vals.shape[0]
Expand Down
6 changes: 3 additions & 3 deletions pyapprox/arbitrary_polynomial_chaos.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def unrotated_canonical_basis_matrix(self, canonical_samples):

def unrotated_basis_matrix(self, samples):
assert samples.ndim == 2
assert samples.shape[0] == self.num_vars()
assert samples.shape[0] == self.nvars
canonical_samples = self.var_trans.map_to_canonical_space(
samples)
matrix = self.unrotated_canonical_basis_matrix(canonical_samples)
Expand All @@ -98,7 +98,7 @@ def canonical_basis_matrix(self, canonical_samples, opts=dict()):

def basis_matrix(self, samples, opts=dict()):
assert samples.ndim == 2
assert samples.shape[0] == self.num_vars()
assert samples.shape[0] == self.nvars
canonical_samples = self.var_trans.map_to_canonical_space(
samples)
return self.canonical_basis_matrix(canonical_samples, opts)
Expand Down Expand Up @@ -349,7 +349,7 @@ def compute_grammian_matrix_using_combination_sparse_grid(
basis_matrix_function, dummy_indices, var_trans, max_num_samples,
error_tol=0,
density_function=None, quad_rule_opts=None):
num_vars = var_trans.num_vars()
num_vars = var_trans.nvars
sparse_grid = CombinationSparseGrid(num_vars)
admissibility_function = partial(
max_level_admissibility_function, np.inf, [np.inf]*num_vars,
Expand Down
2 changes: 1 addition & 1 deletion pyapprox/barycentric_interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def multivariate_hierarchical_barycentric_lagrange_interpolation(

result = \
multivariate_hierarchical_barycentric_lagrange_interpolation_pyx(
x, fn_vals, active_dims,
x, fn_vals, active_dims.astype(np.int64),
active_abscissa_indices_1d.astype(np.int_),
num_abscissa_1d.astype(np.int_),
num_active_abscissa_1d.astype(np.int_),
Expand Down
13 changes: 10 additions & 3 deletions pyapprox/bayesian_inference/laplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,10 @@ def apply(self, vectors, transpose=None):
return z

def num_rows(self):
return self.prior_covariance_sqrt_operator.num_vars()
return self.prior_covariance_sqrt_operator.nvars

def num_cols(self):
return self.prior_covariance_sqrt_operator.num_vars()
return self.prior_covariance_sqrt_operator.nvars

class LaplaceSqrtMatVecOperator(object):
r"""
Expand Down Expand Up @@ -88,7 +88,14 @@ def __init__(self, prior_covariance_sqrt_operator, e_r=None, V_r=None,
self.set_eigenvalues(e_r)

def num_vars(self):
return self.prior_covariance_sqrt_operator.num_vars()
import warnings
warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead",
PendingDeprecationWarning)
return self.prior_covariance_sqrt_operator.nvars

@property
def nvars(self):
return self.prior_covariance_sqrt_operator.nvars

def set_eigenvalues(self,e_r):
self.diagonal = np.sqrt(1./(e_r+1.))-1
Expand Down
4 changes: 2 additions & 2 deletions pyapprox/bayesian_inference/tests/test_laplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def posterior_covariance_helper(prior, rank, comparison_tol,

# Extract prior information required for computing exact posterior
# mean and covariance
num_vars = prior.num_vars()
num_vars = prior.nvars
prior_mean = np.zeros((num_vars),float)
L = L_op(np.eye(num_vars),False)
L_T = L_op(np.eye(num_vars),True)
Expand Down Expand Up @@ -715,7 +715,7 @@ def help_generate_and_save_laplace_posterior(
# map
misfit_model.map_point = lambda : exact_laplace_mean

num_singular_values = prior.num_vars()
num_singular_values = prior.nvars
try:
L_post_op = generate_and_save_laplace_posterior(
prior,misfit_model,num_singular_values,
Expand Down
Binary file not shown.
Binary file not shown.
12 changes: 6 additions & 6 deletions pyapprox/benchmarks/test_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,12 @@ def test_cantilever_beam_gradients(self):
from pyapprox.models.wrappers import ActiveSetVariableModel
fun = ActiveSetVariableModel(
benchmark.fun,
benchmark.variable.num_vars()+benchmark.design_variable.num_vars(),
benchmark.variable.nvars+benchmark.design_variable.nvars,
benchmark.variable.get_statistics('mean'),
benchmark.design_var_indices)
jac = ActiveSetVariableModel(
benchmark.jac,
benchmark.variable.num_vars()+benchmark.design_variable.num_vars(),
benchmark.variable.nvars+benchmark.design_variable.nvars,
benchmark.variable.get_statistics('mean'),
benchmark.design_var_indices)
init_guess = 2*np.ones((2, 1))
Expand All @@ -61,12 +61,12 @@ def test_cantilever_beam_gradients(self):

constraint_fun = ActiveSetVariableModel(
benchmark.constraint_fun,
benchmark.variable.num_vars()+benchmark.design_variable.num_vars(),
benchmark.variable.nvars+benchmark.design_variable.nvars,
benchmark.variable.get_statistics('mean'),
benchmark.design_var_indices)
constraint_jac = ActiveSetVariableModel(
benchmark.constraint_jac,
benchmark.variable.num_vars()+benchmark.design_variable.num_vars(),
benchmark.variable.nvars+benchmark.design_variable.nvars,
benchmark.variable.get_statistics('mean'),
benchmark.design_var_indices)
init_guess = 2*np.ones((2, 1))
Expand All @@ -79,11 +79,11 @@ def test_cantilever_beam_gradients(self):
benchmark.variable, nsamples)
constraint_fun = ActiveSetVariableModel(
benchmark.constraint_fun,
benchmark.variable.num_vars()+benchmark.design_variable.num_vars(),
benchmark.variable.nvars+benchmark.design_variable.nvars,
samples, benchmark.design_var_indices)
constraint_jac = ActiveSetVariableModel(
benchmark.constraint_jac,
benchmark.variable.num_vars()+benchmark.design_variable.num_vars(),
benchmark.variable.nvars+benchmark.design_variable.nvars,
samples, benchmark.design_var_indices)
init_guess = 2*np.ones((2, 1))
errors = pya.check_gradients(
Expand Down
7 changes: 7 additions & 0 deletions pyapprox/density.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,13 @@ def __call__(self, samples):
return self.pdf(samples)

def num_vars(self):
import warnings
warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead",
PendingDeprecationWarning)
return self.num_dims

@property
def nvars(self):
return self.num_dims


Expand Down
6 changes: 3 additions & 3 deletions pyapprox/examples/adaptive_leja_interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def genz_example(max_num_samples,precond_type):

c = np.array([10,0.00])
model = GenzFunction(
"oscillatory",variable.num_vars(),c=c,w=np.zeros_like(c))
"oscillatory",variable.nvars,c=c,w=np.zeros_like(c))
# model.set_coefficients(4,'exponential-decay')

validation_samples = generate_independent_random_samples(
Expand All @@ -51,9 +51,9 @@ def callback(pce):
num_samples.append(pce.samples.shape[1])

candidate_samples=-np.cos(
np.random.uniform(0,np.pi,(var_trans.num_vars(),int(1e4))))
np.random.uniform(0,np.pi,(var_trans.nvars,int(1e4))))
pce = AdaptiveLejaPCE(
var_trans.num_vars(),candidate_samples,factorization_type='fast')
var_trans.nvars,candidate_samples,factorization_type='fast')
if precond_type=='density':
def precond_function(basis_matrix,samples):
trans_samples = var_trans.map_from_canonical_space(samples)
Expand Down
Loading