From 54540841bf972cdbeea0c25c9c9b800c8636270e Mon Sep 17 00:00:00 2001 From: ConnectedSystems Date: Sun, 29 Aug 2021 16:22:29 +1000 Subject: [PATCH 1/3] Standardize to `.nvars` Implement `.nvars` as property where appropriate, and warn of pending deprecation on use of `.num_vars()` --- pyapprox/density.py | 7 +++++++ pyapprox/gaussian_network.py | 17 ++++++++++++++++- pyapprox/multivariate_gaussian.py | 23 +++++++++++++++++++++-- pyapprox/variable_transformations.py | 3 +++ pyapprox/variables.py | 10 ++++++++++ 5 files changed, 57 insertions(+), 3 deletions(-) diff --git a/pyapprox/density.py b/pyapprox/density.py index 90087832..acf56de7 100644 --- a/pyapprox/density.py +++ b/pyapprox/density.py @@ -53,6 +53,13 @@ def __call__(self, samples): return self.pdf(samples) def num_vars(self): + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) + return self.num_dims + + @property + def nvars(self): return self.num_dims diff --git a/pyapprox/gaussian_network.py b/pyapprox/gaussian_network.py index 338f6042..affbfd5b 100644 --- a/pyapprox/gaussian_network.py +++ b/pyapprox/gaussian_network.py @@ -315,7 +315,22 @@ def construct_dataless_network(self): #self.node_var_ids=[[ii] for ii in range(nnodes)] def num_vars(self): - r""" + """ + Return number of uncertain variables in the network + + Returns + ------- + nnetwork_vars : integer + The number of uncertain variables in the network + """ + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) + return self.nnetwork_vars + + @property + def nvars(self): + """ Return number of uncertain variables in the network Returns diff --git a/pyapprox/multivariate_gaussian.py b/pyapprox/multivariate_gaussian.py index da2e2569..c23a772e 100644 --- a/pyapprox/multivariate_gaussian.py +++ b/pyapprox/multivariate_gaussian.py @@ -49,6 +49,9 @@ def apply(self, vectors): raise Exception('Derived classes must implement this function') def num_vars(self): + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) raise Exception('Derived classes must implement this function') def __call__(self, vectors, transpose): @@ -103,6 +106,13 @@ def num_vars(self): r""" Return the number of variables of the multivariate Gaussian """ + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) + return self.covariance.shape[0] + + @property + def nvars(self): return self.covariance.shape[0] class CovarianceOperator(object): @@ -149,10 +159,19 @@ def __init__(self,sqrt_covariance_operator,mean=0.): self.mean=mean def num_vars(self): - r""" + """ Return the number of variables of the multivariate Gaussian """ - return self.sqrt_covariance_operator.num_vars() + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) + return self.sqrt_covariance_operator.nvars + + @property + def nvars(self): + """Return the number of variables of the multivariate Gaussian.""" + return self.sqrt_covariance_operator.nvars + def apply_covariance_sqrt(self, vectors, transpose): return self.sqrt_covariance_operator(vectors, transpose) diff --git a/pyapprox/variable_transformations.py b/pyapprox/variable_transformations.py index 9e4354a2..f8f92235 100644 --- a/pyapprox/variable_transformations.py +++ b/pyapprox/variable_transformations.py @@ -87,6 +87,9 @@ def map_to_canonical_space(self, samples): return samples def num_vars(self): + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) return self.nvars def map_derivatives_from_canonical_space(self, derivatives): diff --git a/pyapprox/variables.py b/pyapprox/variables.py index 2b13da48..5fe7528b 100644 --- a/pyapprox/variables.py +++ b/pyapprox/variables.py @@ -257,6 +257,9 @@ def __init__(self, unique_variables, unique_variable_indices=None, self.variable_labels = variable_labels def num_vars(self): + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) return self.nvars def all_variables(self): @@ -462,6 +465,13 @@ def __init__(self, bounds): self.bounds = bounds def num_vars(self): + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) + return len(self.bounds.lb) + + @property + def nvars(self): return len(self.bounds.lb) From f4f057a2e8b9847559626ac76802b1042ced9ee2 Mon Sep 17 00:00:00 2001 From: ConnectedSystems Date: Sun, 29 Aug 2021 16:24:23 +1000 Subject: [PATCH 2/3] Replace use of `.num_vars()` with `.nvars` --- docs/source/adaptive_leja_sequences.rst | 6 +-- .../source/polynomial_chaos_interpolation.rst | 8 ++-- examples/plot_design_under_uncertainty.py | 8 ++-- pyapprox/adaptive_sparse_grid.py | 21 +++++---- pyapprox/approximate.py | 18 ++++---- pyapprox/arbitrary_polynomial_chaos.py | 6 +-- pyapprox/bayesian_inference/laplace.py | 13 ++++-- .../bayesian_inference/tests/test_laplace.py | 4 +- ...marks.random_oscillator_rhs-342.py38.1.nbc | Bin 0 -> 15850 bytes ...chmarks.random_oscillator_rhs-342.py38.nbi | Bin 0 -> 1203 bytes pyapprox/benchmarks/test_benchmarks.py | 12 ++--- .../examples/adaptive_leja_interpolation.py | 6 +-- pyapprox/gaussian_network.py | 2 +- pyapprox/gaussian_process.py | 10 ++--- pyapprox/models/wrappers.py | 4 +- pyapprox/multivariate_gaussian.py | 10 ++--- .../multivariate_polynomials.py | 39 +++++++++------- .../tests/test_multivariate_polynomials.py | 14 +++--- pyapprox/polynomial_sampling.py | 2 +- pyapprox/probability_measure_sampling.py | 4 +- pyapprox/sensitivity_analysis.py | 2 +- pyapprox/system_analysis.py | 4 +- pyapprox/tests/test_approximate.py | 4 +- .../tests/test_control_variate_monte_carlo.py | 4 +- pyapprox/tests/test_gaussian_process.py | 2 +- pyapprox/tests/test_induced_sampling.py | 4 +- .../test_orthogonal_least_interpolation.py | 2 +- pyapprox/tests/test_sensitivity_analysis.py | 10 ++--- pyapprox/tests/test_sparse_grid.py | 7 ++- pyapprox/variable_transformations.py | 42 ++++++++++++++---- pyapprox/variables.py | 10 ++--- pyapprox/visualization.py | 12 ++--- .../tests/test_markov_chain_monte_carlo.py | 6 +-- .../plot_advection_diffusion_model.py | 2 +- .../tutorials/plot_bayesian_inference.py | 6 +-- .../plot_adaptive_leja_interpolation.py | 4 +- 36 files changed, 177 insertions(+), 131 deletions(-) create mode 100644 pyapprox/benchmarks/__pycache__/surrogate_benchmarks.random_oscillator_rhs-342.py38.1.nbc create mode 100644 pyapprox/benchmarks/__pycache__/surrogate_benchmarks.random_oscillator_rhs-342.py38.nbi diff --git a/docs/source/adaptive_leja_sequences.rst b/docs/source/adaptive_leja_sequences.rst index 26d030d8..08c21f6f 100644 --- a/docs/source/adaptive_leja_sequences.rst +++ b/docs/source/adaptive_leja_sequences.rst @@ -50,7 +50,7 @@ Our goal is to demonstrate how to use a polynomial chaos expansion (PCE) to appr c = np.array([10,0.01]) model = GenzFunction( - "oscillatory",variable.num_vars(),c=c,w=np.zeros_like(c)) + "oscillatory",variable.nvars,c=c,w=np.zeros_like(c)) # model.set_coefficients(4,'exponential-decay') Here we have intentionally set the coefficients :math:`c`: of the Genz function to be highly anisotropic, to emphasize the properties of the adaptive algorithm. @@ -105,9 +105,9 @@ Now we setup the adaptive algorithm. error_tol=1e-10 candidate_samples=-np.cos( - np.random.uniform(0,np.pi,(var_trans.num_vars(),int(1e4)))) + np.random.uniform(0,np.pi,(var_trans.nvars,int(1e4)))) pce = AdaptiveLejaPCE( - var_trans.num_vars(),candidate_samples,factorization_type='fast') + var_trans.nvars,candidate_samples,factorization_type='fast') max_level=np.inf max_level_1d=[max_level]*(pce.num_vars) diff --git a/docs/source/polynomial_chaos_interpolation.rst b/docs/source/polynomial_chaos_interpolation.rst index 23d65729..a0ab1279 100644 --- a/docs/source/polynomial_chaos_interpolation.rst +++ b/docs/source/polynomial_chaos_interpolation.rst @@ -41,10 +41,10 @@ Our goal is to demonstrate how to use a polynomial chaos expansion (PCE) to appr univariate_variables = [uniform(),beta(3,3)] variable = IndependentMultivariateRandomVariable(univariate_variables) - c = np.random.uniform(0.,1.,variable.num_vars()) + c = np.random.uniform(0.,1.,variable.nvars) c*=4/c.sum() w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1) - model = GenzFunction( "oscillatory",variable.num_vars(),c=c,w=w ) + model = GenzFunction( "oscillatory",variable.nvars,c=c,w=w ) PCE represent the model output :math:`f(\V{\rv})` as an expansion in orthonormal polynomials, @@ -129,7 +129,7 @@ To set the PCE truncation to a third degree total-degree index set use close-figs degree=3 - indices = compute_hyperbolic_indices(poly.num_vars(),degree,1.0) + indices = compute_hyperbolic_indices(poly.nvars,degree,1.0) poly.set_indices(indices) Now we have defined the PCE, we are now must compute its coefficients. Pyapprox supports a number of methods to compute the polynomial coefficients. Here we will use interpolation. Specifically we evaluate the function at a set of samples :math:`\mathcal{Z}=[\V{\rv}^{(1)},\ldots,\V{\rv}^{(M)}]` to obtain a set of function values :math:`\V{f}=[\V{f}^{(1)},\ldots,\V{f}^{(M)}]^T`. The function may be vectored valued and thus each :math:`\V{f}^{(i)}\in\mathbb{R}^Q` is a vector and :math:`\V{F}\in\mathbb{R}^{M\times Q}` is a matrix @@ -149,7 +149,7 @@ Sampling from this measure is asymptorically optimal (as degree increases) for a close-figs ntrain_samples = int(poly.indices.shape[1]*1.1) - train_samples = -np.cos(np.random.uniform(0,2*np.pi,(poly.num_vars(),ntrain_samples))) + train_samples = -np.cos(np.random.uniform(0,2*np.pi,(poly.nvars,ntrain_samples))) train_samples = var_trans.map_from_canonical_space(train_samples) train_values = model(train_samples) diff --git a/examples/plot_design_under_uncertainty.py b/examples/plot_design_under_uncertainty.py index 2aa65e7d..85e2b887 100644 --- a/examples/plot_design_under_uncertainty.py +++ b/examples/plot_design_under_uncertainty.py @@ -35,10 +35,10 @@ nsamples = 10 samples = pya.generate_independent_random_samples(benchmark.variable,nsamples) fun = ActiveSetVariableModel( - benchmark.fun,benchmark.variable.num_vars()+benchmark.design_variable.num_vars(), + benchmark.fun,benchmark.variable.nvars+benchmark.design_variable.nvars, samples,benchmark.design_var_indices) jac = ActiveSetVariableModel( - benchmark.jac,benchmark.variable.num_vars()+benchmark.design_variable.num_vars(), + benchmark.jac,benchmark.variable.nvars+benchmark.design_variable.nvars, samples,benchmark.design_var_indices) nsamples = 10000 @@ -49,9 +49,9 @@ seed=1 generate_sample_data = partial( generate_monte_carlo_quadrature_data,generate_random_samples, - benchmark.variable.num_vars(),benchmark.design_var_indices,seed=seed) + benchmark.variable.nvars,benchmark.design_var_indices,seed=seed) -num_vars = benchmark.variable.num_vars()+benchmark.design_variable.num_vars() +num_vars = benchmark.variable.nvars+benchmark.design_variable.nvars objective = StatisticalConstraint( benchmark.fun,benchmark.jac,expectation_fun,expectation_jac,num_vars, benchmark.design_var_indices,generate_sample_data,isobjective=True) diff --git a/pyapprox/adaptive_sparse_grid.py b/pyapprox/adaptive_sparse_grid.py index 0c8dd357..acf478c8 100644 --- a/pyapprox/adaptive_sparse_grid.py +++ b/pyapprox/adaptive_sparse_grid.py @@ -577,19 +577,19 @@ def convert_sparse_grid_to_polynomial_chaos_expansion(sparse_grid, pce_opts, pce = PolynomialChaosExpansion() pce.configure(pce_opts) if sparse_grid.config_variables_idx is not None: - assert pce.num_vars() == sparse_grid.config_variables_idx + assert pce.nvars == sparse_grid.config_variables_idx else: - assert pce.num_vars() == sparse_grid.num_vars + assert pce.nvars == sparse_grid.num_vars def get_recursion_coefficients(N, dd): - pce.update_recursion_coefficients([N]*pce.num_vars()) + pce.update_recursion_coefficients([N]*pce.nvars) return pce.recursion_coeffs[pce.basis_type_index_map[dd]].copy() coeffs_1d = [ convert_univariate_lagrange_basis_to_orthonormal_polynomials( sparse_grid.samples_1d[dd], partial(get_recursion_coefficients, dd=dd)) - for dd in range(pce.num_vars())] + for dd in range(pce.nvars)] indices_list = [] coeffs_list = [] @@ -1070,10 +1070,10 @@ def set_config_variable_index(self, idx, config_var_trans=None): self.config_var_trans = config_var_trans self.num_config_vars = self.num_vars-self.config_variables_idx if self.variable_transformation is not None: - assert (self.variable_transformation.num_vars() == + assert (self.variable_transformation.nvars == self.config_variables_idx) if self.config_var_trans is not None: - assert self.num_config_vars == self.config_var_trans.num_vars() + assert self.num_config_vars == self.config_var_trans.nvars def eval_cost_function(self, samples): config_samples = self.map_config_samples_from_canonical_space( @@ -1255,9 +1255,9 @@ def get_sparse_grid_univariate_leja_quadrature_rules( unique_max_level_1d = \ get_sparse_grid_univariate_leja_quadrature_rules_economical( var_trans, growth_rules=None) - quad_rules = [None for ii in var_trans.num_vars()] - growth_rules = [None for ii in var_trans.num_vars()] - max_level_1d = [None for ii in var_trans.num_vars()] + quad_rules = [None for ii in var_trans.nvars] + growth_rules = [None for ii in var_trans.nvars] + max_level_1d = [None for ii in var_trans.nvars] for quad_rule, growth_rule, indices, max_level in zip( unique_quad_rules, unique_growth_rules, unique_quadrule_indices, unique_max_level_1d): @@ -1632,6 +1632,9 @@ def num_vars(self): ------- The number of configure variables """ + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) return self.nvars diff --git a/pyapprox/approximate.py b/pyapprox/approximate.py index a66cfb09..0263ff2e 100644 --- a/pyapprox/approximate.py +++ b/pyapprox/approximate.py @@ -175,9 +175,9 @@ def adaptive_approximate_sparse_grid( The sparse grid approximation """ var_trans = AffineRandomVariableTransformation(variables) - nvars = var_trans.num_vars() + nvars = var_trans.nvars if config_var_trans is not None: - nvars += config_var_trans.num_vars() + nvars += config_var_trans.nvars sparse_grid = CombinationSparseGrid(nvars) if max_level_1d is None: @@ -255,7 +255,7 @@ def __initialize_leja_pce( else: break - nvars = variables.num_vars() + nvars = variables.nvars if generate_candidate_samples is None: # Todo implement default for non-bounded variables that uses induced # sampling @@ -290,7 +290,7 @@ def __setup_adaptive_pce(pce, verbose, fun, var_trans, growth_rules, refinement_indicator, admissibility_function, growth_rules, unique_quadrule_indices=unique_quadrule_indices) - nvars = var_trans.num_vars() + nvars = var_trans.nvars if max_level_1d is None: max_level_1d = [np.inf]*nvars elif np.isscalar(max_level_1d): @@ -408,7 +408,7 @@ def adaptive_approximate_polynomial_chaos_induced( var_trans = AffineRandomVariableTransformation(variables) pce = AdaptiveInducedPCE( - var_trans.num_vars(), induced_sampling=induced_sampling, + var_trans.nvars, induced_sampling=induced_sampling, cond_tol=cond_tol, fit_opts=fit_opts) __setup_adaptive_pce(pce, verbose, fun, var_trans, growth_rules, @@ -657,7 +657,7 @@ def adaptive_approximate_gaussian_process( """ assert max_nsamples <= ncandidate_samples - nvars = variables.num_vars() + nvars = variables.nvars if normalize_inputs: var_trans = AffineRandomVariableTransformation(variables) @@ -1231,7 +1231,7 @@ def _cross_validate_pce_degree( rng_state = np.random.get_state() for degree in range(min_degree, max_degree+1): indices = compute_hyperbolic_indices( - pce.num_vars(), degree, hcross_strength) + pce.nvars, degree, hcross_strength) pce.set_indices(indices) if ((pce.num_terms() > 100000) and (100000-prev_num_terms < pce.num_terms()-100000)): @@ -1258,7 +1258,7 @@ def _cross_validate_pce_degree( prev_num_terms = pce.num_terms() pce.set_indices(compute_hyperbolic_indices( - pce.num_vars(), best_degree, hcross_strength)) + pce.nvars, best_degree, hcross_strength)) pce.set_coefficients(best_coef) if verbose > 0: print('best degree:', best_degree) @@ -1430,7 +1430,7 @@ def _expanding_basis_pce(pce, train_samples, train_vals, hcross_strength=1, max_iters=20, max_num_step_increases=1): assert train_vals.shape[1] == 1 - num_vars = pce.num_vars() + num_vars = pce.nvars if max_num_init_terms is None: max_num_init_terms = train_vals.shape[0] diff --git a/pyapprox/arbitrary_polynomial_chaos.py b/pyapprox/arbitrary_polynomial_chaos.py index 24f51b47..1b9ec8f6 100644 --- a/pyapprox/arbitrary_polynomial_chaos.py +++ b/pyapprox/arbitrary_polynomial_chaos.py @@ -76,7 +76,7 @@ def unrotated_canonical_basis_matrix(self, canonical_samples): def unrotated_basis_matrix(self, samples): assert samples.ndim == 2 - assert samples.shape[0] == self.num_vars() + assert samples.shape[0] == self.nvars canonical_samples = self.var_trans.map_to_canonical_space( samples) matrix = self.unrotated_canonical_basis_matrix(canonical_samples) @@ -98,7 +98,7 @@ def canonical_basis_matrix(self, canonical_samples, opts=dict()): def basis_matrix(self, samples, opts=dict()): assert samples.ndim == 2 - assert samples.shape[0] == self.num_vars() + assert samples.shape[0] == self.nvars canonical_samples = self.var_trans.map_to_canonical_space( samples) return self.canonical_basis_matrix(canonical_samples, opts) @@ -349,7 +349,7 @@ def compute_grammian_matrix_using_combination_sparse_grid( basis_matrix_function, dummy_indices, var_trans, max_num_samples, error_tol=0, density_function=None, quad_rule_opts=None): - num_vars = var_trans.num_vars() + num_vars = var_trans.nvars sparse_grid = CombinationSparseGrid(num_vars) admissibility_function = partial( max_level_admissibility_function, np.inf, [np.inf]*num_vars, diff --git a/pyapprox/bayesian_inference/laplace.py b/pyapprox/bayesian_inference/laplace.py index 53a01557..b874054a 100644 --- a/pyapprox/bayesian_inference/laplace.py +++ b/pyapprox/bayesian_inference/laplace.py @@ -43,10 +43,10 @@ def apply(self, vectors, transpose=None): return z def num_rows(self): - return self.prior_covariance_sqrt_operator.num_vars() + return self.prior_covariance_sqrt_operator.nvars def num_cols(self): - return self.prior_covariance_sqrt_operator.num_vars() + return self.prior_covariance_sqrt_operator.nvars class LaplaceSqrtMatVecOperator(object): r""" @@ -88,7 +88,14 @@ def __init__(self, prior_covariance_sqrt_operator, e_r=None, V_r=None, self.set_eigenvalues(e_r) def num_vars(self): - return self.prior_covariance_sqrt_operator.num_vars() + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) + return self.prior_covariance_sqrt_operator.nvars + + @property + def nvars(self): + return self.prior_covariance_sqrt_operator.nvars def set_eigenvalues(self,e_r): self.diagonal = np.sqrt(1./(e_r+1.))-1 diff --git a/pyapprox/bayesian_inference/tests/test_laplace.py b/pyapprox/bayesian_inference/tests/test_laplace.py index e71784fc..1c7ca26b 100644 --- a/pyapprox/bayesian_inference/tests/test_laplace.py +++ b/pyapprox/bayesian_inference/tests/test_laplace.py @@ -216,7 +216,7 @@ def posterior_covariance_helper(prior, rank, comparison_tol, # Extract prior information required for computing exact posterior # mean and covariance - num_vars = prior.num_vars() + num_vars = prior.nvars prior_mean = np.zeros((num_vars),float) L = L_op(np.eye(num_vars),False) L_T = L_op(np.eye(num_vars),True) @@ -715,7 +715,7 @@ def help_generate_and_save_laplace_posterior( # map misfit_model.map_point = lambda : exact_laplace_mean - num_singular_values = prior.num_vars() + num_singular_values = prior.nvars try: L_post_op = generate_and_save_laplace_posterior( prior,misfit_model,num_singular_values, diff --git a/pyapprox/benchmarks/__pycache__/surrogate_benchmarks.random_oscillator_rhs-342.py38.1.nbc b/pyapprox/benchmarks/__pycache__/surrogate_benchmarks.random_oscillator_rhs-342.py38.1.nbc new file mode 100644 index 0000000000000000000000000000000000000000..49cfececa6da8e53a51a726e41abd5169cab7f8b GIT binary patch literal 15850 zcmdUW4R}*kw)RPrCTR(2N-L(*A5Un66x*1ewzTx;CQS-KDpWv4DW++Yw!!A7Y5J=` z+O$n8Q3FP;gEJPd`l$Drv0fPkL~UuoB1H>$0mYGaKwv(>0bwu<4t#5$oRfw$W$rig z-0$(8r#XA?cdxznT5GSp_Bm(oIwGGxS%49j$bE-RXVzOxT1$PUv9?xcx7f6{ntHFB zX{lIesI+^F6tNh#vAASW1dZlz19uu$2*0V<0Cy5o5LpEl4U`W(tZ|;#?DeHRDU`t7 zF2prGpKCVl7LEatW>bgI1L@xDJS!V+EX_`$DT1y&Oxt_lkpV!Lv-e(p1ggufL18g} z@AXGIgm0qOJ1F9tV*oDZ?HyVT^s(9STf~+Q?DdJDJGevF8t1S^``A#G=Rr-+cglwz zdU&PAd3s#acHRNXfL!ax;MOpp={cl%n&jT2hGtOcM_oZdgbJujRKPX6sLm(FQ0|}u zsN9L}Ad>IlfxSPcbqJBY_Wm5(A>QYE_Lw zRpOYtItZyf;Nrww0XvQX0%qwDo`Vc>CLRdC_u|9*!TbDX)AW=OhE*fOBKHq)zs0bD zN$d6-wrv=7F%)x`8u)hZw=*wlZbsQ@LqW}Njsr!| zW-*%#X8S^1`{*)B&awtmg%02xYm3fmwON{_k_v;lvc{ydJzg)9)Hm2{mTH~dpbbK^ z!geQ=WlD>)ls22LWr@DTC@q$%7ML3Bx{6xE0$r`K+HBAl>;FMLC32(tS4D8a)P@2;K(6C4hIT^Yh@h4b6$cAv7EWbsj^B_ey zbkd_--{@C!$sL+CCDF+PtPzIt^}^hJWKUIDZ;GKps|}SEj+go(3NUMzPqBH^Fs?i%QF3sAhrPJS0x} zosYyx7<-1y{7B*s($0^Q_*O{6*-b*$BR!a89wlqZJi>oP=23#=M;F3bM9zOh{)sMx z&meOCTP_0NLP@X>J|2We5fuL0yFvJ@AbcPQ57y&I5PnAx{hxwxP7q#2x<0B#V@h4_ zAUv7^A?*voNn_74&w#k$euX-(3KQ~$M_-QJ~s$Q z^kZb5BnY1wM6V3OgZV5C!h`ud7=#D&(FNhb_W2|T54PuxAe{1LBo*+OXtB{+ zTkJI!GY0EsK)RI%`x3jsgo(`-yFt8o*@GGNc3tJ;s1ut_3$-ilHdtl}QS^pNo1qG- zv}!};3doVd|Bo$gezO^BKzg0Y039oB)!GNmRuGV|Eg*yywaR+6r2$su0I&$u4M0ef z<&~AN?i=)&xZZAq6&%tQ6Or01dYxTI>GHqku>u}-7F7BN^cV7=Q(4tut`vh@zYuxy zVWh!z3t{#VLoBYWw@b6h>H04xy$}$|P;m<{oLod+3aLc#Gx$Pl1j2&XE>y2TH@Kog z9%+Kt4pfg~TnGt*F$Wp6tYkb#YZS^-v5bm!n?OF)fACrbMXZ!$8rmOKrqVZ|F=|hC zgPIaoIoUo9RR&#DI|_OSG9e^HRnO$3A4N)}p|nc?fImvj0Ii2?UlTz58A^W&Rzu@( z_Pa zd*tJ)`>$`;&UnC4f?5jT5#U}Bb4G-K=nXxIu-Z~LD@Pbhm^@kT7Fq}A?nrh*3S(1+ zgI^p`v}H!oFi#er!{#rImQ{3Xtiys-^(>d0r`!^uA7Q^6kE<4S+&8ac6bs(wDR~V2f6a-C zV9%*#^l={LjXl&QWWRY&5pV2a#@&7W>_-)qQ4#Yu@r3r-f>Ke2dX|v(y9Df|R5mVJ zccLi$_kTK^x0vTGnt_St?&Lg*^9p#1AL21(JQ(T$uq7I?ME5wn=H6rt#9&6JfT4&; zPxL6US>?1hXELSO424M&9kCi)=hP()M6615SJGxXzAnc({n(ex)D}nNo7tkzQ2Q7M zqg0WMt?wUP9f8_1;;;kES7RLp`ozzu9?MDeoJAQ{-%Kp(oO32K9+li zUXI>M;?kcSpy&A_^ZK0G4p&``^9c>B33oN^aJAGp8+%z#@Le?qk*!ozF3RY`5%Ra{ZGu(-yshr!4v08a7n}cN<=Qj7T z>dMK6Kx~rCsKZ&!eODYjL4tln6U9@ccI;2vv-QY##-G0WKowCnhnEd5tpJg+s)*x) zgNy~7Z%_d)hMaaWOA!%)HEOn9n!R_7e3Da0;;@Ox6dWseUAgxYE9YB3H6rJ{_2j&} zg;+0|ZfM3}j+d~mRcD7w6@e*>VTkNZ+T3)k7lqqNGU05U0<|G$G9p`d{woIo`GZ4M z1b@ZmM~=~G3IP3cc$6V|EjsHgotWVMu+|$@uKDY_Qof({B-!8opyz~g+qQKj6EO?! z^t>sTjuY6VY=U{O(+KVkqGF%~Q@#Dt6G#0*CWSaJ@W~%9{O8u+)7SrD8UOP~ESrXkKS*YM;hL~sOT5e3bKnQ*uH+ZqIV(H9 zTm0fDU+7w|-6Q_+({qihKmP53bwBoR{n@%0^PEjvWy5x}Op|zpZF!DPjF#sHHC9O15(kZu`1ceBgk2 zVfSxcFCr_y3v>4+Nabut((pu5RXnYrTb0u>*;3Sj+_;_lkon=M<&v z8BG3sTT+!Ik|1*?j?xQ8R0VE&!DzGFZe=~eX0>QsRx7L3N^fj-S#ejwXe{@F?ezTJ&ioE~-sr{n#&XsZHLL|S zE*sn(F0037EoU`~SdG3&VB6$yY1ytizRT9@YUyoD|KTF{hjH#-2e{`Y^oo&fNvF3Z zjS}~7bV;8Hl7`!oKhrUfdQ`b1s@z`UzGEoItIFx7=Xr|*FVJh)abIV zcG;{>dkw3(+-2vx?0v3;&sWC$fW`b+%02Djo>Ih2;N0_|1ujbdW-w)sGUc!!>6^Bc zZ^n~OPbB{zT1plFft-HT!k3+SqoqPpk#bj4Gpo7xfs!kp7-WHzbt#|elFkW|za328 zKubXa3z9GDn5#OSIio{){i*^m$j^K7Ppk5}p*ZB;8x5Menm`^lt5w9Z^|qCr;&IOp zaK9_%UY4W|9#*EDp1AY)MDn*FfF>cY-&wHTnbdCY}zh@>&!Nn*akW3HBR%{=ZY2^Wf&$C-Z~=bkR*`Xs7}uLa3x zwk2Jp5t8c`efiZ;4h&ZR(7nf13rAH8yHyK8>oHZ~Xia{Lh}G)rwD-ESFmM_d6v4%? z794$b!7y8j23K`+5~6G}4W8QO-n{vcx{oK)=SyWp&v*p?&gab{-r6hR1+%h7+hj0S z*VyYZER`LV-R{xLIc*Z`E$~~!Yeuw91pdHkU2W}E6~M6et8&MN+Rr5I+cxu@61nRc zL9*Y@volYXbI+E-@Wq@)gBWwRlzRnOV22<^pb*O%&410A_cAyIy`ayT4-VoPLUTli z!*dR#aSUZT3T{XluN&j7iYP8+red$TV;wlVo%ovrRoMNpueL{Yj=kzhZQmjAyup5^ zsI7>-awMv)ELOd;WB)dV;QrXRT@jsS1N*y`v<{qKx}H}SJ0hUzf2Gc=RWN4Jt{6(4 z(TUl3dmHx93>J#nO8CvTVVfr^H<;$IcnO7PPGl3s8zv!9JoN%9R&OxPhvJ4aC*CKD zH%#e7@ziX!?}NwI#7vy{n@L0V5R=M<>xUo`ML@9%D5OZVkWlFEIExv^IKz@W`kTX#FXh!2Z(YcJg3Wo@1Gi~L}!761MDyE~Nl}TG% z{2r&xdY)f;7el{8fYT+t27Y(>@IbkO$9aF@IcN0&$FCjwS+(;ApW)-t?Dn>#ekG6j za#lroCUE#;GN(v3*r;G{=lu9OHfLxijo&e7b?TTpYg_+&oX%J`pU+{p4>F_Q7&thK zksGyCsrb+6_vuAAGvVA)rTWboBj9%+?vte+jJD7qY z{5Y3QOVWGpjpQYm+2?UiS0bm7?VQo^8-vD6YMQ-fJTkZU?wrG%R}W_LUTAlm{uSc{ zO)%o*TzMUz^A!^*KLaU0Qe=(ZIKWC}K2L8i$9tTK5O>eh(mX|rqF*b&FqqBnv;IpoAW`o!l9>-TlbPMWBQqZ%zu5Zp_)kaWzQ3-Is#0XG?6{_uDslqQ`tj3(L=WclAdMD=F{z=Y2dCF(bzwh~zW|g`( zapKr^v!z3OWcOg~hWmfID)+r_THv&tyjqukJVxE6S2!)-@5}Q2%~a&H{KX|Ma?$Hx345c+C0E*{fN;UFPM_Y`nZdfEE65 zFIVynEkez_5chzc&P5g`?7w35)>A+2)UqAg>znADxr==brmZiX`pb4eo+XeozPt-} zMvNc1@h8l=H+I9?V^x{HCevc)#uHtczMo7>oEuNQA@^P15N)}QZ7HQ9F`CrBUaVm* z=D*L`BB@Q-qkxPR=dJ1bH=h67hz`fFe*<3$UNay@@iOU>h|-A2qDyQnQjTpO#m3}x zAcL5*w0(0C^UzfTZz=RD$3SDDY1-|#&*bx4oBJ9Gv)ce%<*)tKb$Kq*w)SMew zi9@?)ld5bbgB>UJ(IvFG^lMiM8x_VF~;+-w>q<(20^8bch1wM zVC0_8t14$WG4`NUrEmISb@9LsML-bCSE%Rlq$J#e0c8{N0(C_GwXZ&zq^OW=oeSq(pQp%flRh}SuU zuLnBx&Eq*p*(ik{y800l@5 zDy8s7z|qG>Dy8r?z|mJdDy8t}07vpuDTTiZ__KghDZ-K6q+oOOF^oznJP&ZRV@0JD zAN1uAK4lSK5K&6tU}rtxXa|Z)DIB1{c%pGdxDaSj9_?6DDZ-)X_o1K^;cTEEBrz(b z=&j%nNM9P3-IM94(2R@vKzP0=U4;tKVC&|BM6ud=5 zQvYw+?g-iQBBlSI+IafK>|xZ_Hk!n+T_>)}&iut~_)OW33)wTg?Iz@{+n}NQc!4cO zKTB%!E@(@YYB9OdM^%zLn!m8!C}a%({%P1vU1Fe322;JkF1AXHIbvgWrho9!ZY&&P zvcthki%lvKOT^OVoNR4&W`?ygqsa&dHJj=)O!bYGf$c67ue@Dem)9L zYk{8~eyvD$`r}RY-ZVGMW~fJWUUx!}GDJ(Ut^#)WbZ{Wm9j8Tz)@C4f(`^mNR-lcW zRcW!d*bLQPH+O1f;uMC^+O9L24K|R5{h--MY%(K0?1~0st=(t_oH_*ioOhYai%b=% zC*-S1s=3R}pTe>p6xAVqk;G0lFsr$nY{>ANl~sif0%T{RUX1o8quyTQU8-z zgLmnj4KD8rXkKZ74Sw&^h#Il`4oWl;*%WWaWoh0T>HT-n{M2l)C}ATnA=XsWnPKOc zu)aHzINlZ{SBU&o$bOhom%XEd0NbqxkmvOB|`ybwxZGz{iOlIXc#;BpU&RM%Q6 zbZ{ud2vcC0n`t!Hg712J+$^&NPABT>y*&+G-U|2ZAg2VKsMC@#NH8amW9zj5BM+Fl z)?0HQ;cg*w1GdrO^oq#}hrwaSx}#RY*+-qd!DfI~5HdtH3u;vUkzcmV?ycxaTfGUM zyi)N+##+NdT(z(aB3Lz?;ixaOG?`VkmIggS&|&iWGAgP}W+uZ+S-9{bwzfp4;u#qk zc*&Aw#kf3EMiio>73OtDyQorESz{V}QDjaXY3*b~|wMdcl-RyRS?UdHJk^s8!NTd20Jt|iHNhQ1z;#xdJN44AUl!KHtK8zflkx$Dsw?T z5i~Q5W*n_OXsN+TCWYY@)q_a`EGA_l#D|qH^HxsUnr#s!P^%y0lWdz~F`I zg1Y@zl|eivwUK5v5EeEY;0!Vx_7!J?Tm&QvZ!;h)O!prNE)Y&p>Nb{~LOUsh-=sO> zoLf!$kGw-Bxz(h()0y;k?jeP4|7DW`k4bH0)JU5eV3D@gwuq$?kW3iH)bnxSR3*?% z4V`p^FHRv1^hs1tF1!?q;=Ak?@L?h(zw+|{|CL+=CKd(_sK#$2FL$D0sLOB)tCcS6@NGc37BH%?~ zw3^n9rqTv_^}E@WF%Q%W(A0Cao~6&u4&K;k1}uvM|(vzWuU=nOgx0GFZ#R zGBn?Ud_oo`Hp_xyb7qK7$j~ws)+Yj94y#{iaLz;>|MNZ(B>kjDnc46(uGjh>t;8}k zd;NKl*}4$VhnJy-S_3{09;vJ?^Zft9F%O<^CLeIhOh&U2Orh7w%gW%nsvhdd|3k^B z(A68t%4&@jWo7lSm}a1Zth#CgkP!UJ%CuT&Y}abx5oaE`NZwEMDEj}~bC5l92z!_e zaB^5LmZ4|_i@45iw~>S9F4|GbN}2K1N=I|R!BF3h4Pcp&V^hzTA> z{`t6_c&ZGyR@nm_iJqpm4%7>?CB#{BYDm!{jsRSk88X(Qu)={gU>FOgP{X*xYka|) zhjg~-8cy>xzfhi;nYK0^hd2HLQ5t&Iu>q@sWfrr+KgfY6=rlZ|F#J*-&Wr(-{Vo=w zU7%K&JtfA;iRXtfQ3@je6!T3RT|+4YJ^JMdbx>)bUYJeQ>GXQB90kYl3&A}gPB`JG z7by&Npj$$$a0^a=LYSTHe_^zve{B%UQFsgjMCc8Hasg^-pR6~3AzmSsEqLwvMm@gTvYb0yuqderKC?{EaPwN`l+WBTH`f4m{T_ayR-<}5V P!CrW?U)AZgd&U133lZBK literal 0 HcmV?d00001 diff --git a/pyapprox/benchmarks/__pycache__/surrogate_benchmarks.random_oscillator_rhs-342.py38.nbi b/pyapprox/benchmarks/__pycache__/surrogate_benchmarks.random_oscillator_rhs-342.py38.nbi new file mode 100644 index 0000000000000000000000000000000000000000..1b45dfe41186a9350dc7d8328f6e55ee83af2bff GIT binary patch literal 1203 zcmZWpO^@3)5ZyHEO;Mn|6ipiRl3t1w`hX=-vUE!jparr>Q3NQ^o54>@wM1%3O0okK z$Z0nR-In=3{TV&ww{+-@-NX9u`1r_~c{7sye(~|Ev%BAXv5fO9Ub^`G;^Xc4o%rj^ ze_#Kz{_xYMH}dyu_qUs$%Aq4Trb_eJ?t5-xOpMi-S~q{Y_R3mwsw8LwwMx5NH$S4P z-)p{c^EdYo?UyLG?afTQR0g8A#NKCd6m z-`}%mC~mQaxWzB6!al72`g7p9#2p8F!XFgKS843jFk8FJ7xQ9G731#m$#CP|y7^qP zrgN7M=8G2Z)d1~2c;)KH^LN;LeEvB{_$*=JvsXX6`stgu4-b*`5A}~XZhpKgi*t6nVL-0nUO+yv)#9N%iW)Ud~tRb6bMX&0cJY@ZU9El#$6glfO(PZ!C)DDZ(EAcD4xcgpfG zpsi>JQ>)&3bs`?$ajoE_QzPIG`T;D%h|4S5u`qxRDWEq8fPk8fA=p4Ru`|%eMnL_b zkz-FAXtkaCFlgU_i!kUqm=`1_Jd9vrjfRmPDVXji+*r8M)3zI+p&s5=(ZCG*?Evbb z7uaYnxTW=E6A-lk2BMvG11&|G^-wm@Ov}S!I0qNnc41&}EdV#WFfujxH?^@_5WIJQ z^nlp`Q4G03Lov+Duh?%5&%(%(E{sP1{3B@58@=)m(p&Gh;B_KWLHAH}r=azn!6W29 zEHJpk>gRpOjDz>@Maei`WhAF;Rb?xY;Iu%DEiuiPRN$`Z|{{b$` Bp)mje literal 0 HcmV?d00001 diff --git a/pyapprox/benchmarks/test_benchmarks.py b/pyapprox/benchmarks/test_benchmarks.py index 887fa9ae..2b44dfc2 100644 --- a/pyapprox/benchmarks/test_benchmarks.py +++ b/pyapprox/benchmarks/test_benchmarks.py @@ -46,12 +46,12 @@ def test_cantilever_beam_gradients(self): from pyapprox.models.wrappers import ActiveSetVariableModel fun = ActiveSetVariableModel( benchmark.fun, - benchmark.variable.num_vars()+benchmark.design_variable.num_vars(), + benchmark.variable.nvars+benchmark.design_variable.nvars, benchmark.variable.get_statistics('mean'), benchmark.design_var_indices) jac = ActiveSetVariableModel( benchmark.jac, - benchmark.variable.num_vars()+benchmark.design_variable.num_vars(), + benchmark.variable.nvars+benchmark.design_variable.nvars, benchmark.variable.get_statistics('mean'), benchmark.design_var_indices) init_guess = 2*np.ones((2, 1)) @@ -61,12 +61,12 @@ def test_cantilever_beam_gradients(self): constraint_fun = ActiveSetVariableModel( benchmark.constraint_fun, - benchmark.variable.num_vars()+benchmark.design_variable.num_vars(), + benchmark.variable.nvars+benchmark.design_variable.nvars, benchmark.variable.get_statistics('mean'), benchmark.design_var_indices) constraint_jac = ActiveSetVariableModel( benchmark.constraint_jac, - benchmark.variable.num_vars()+benchmark.design_variable.num_vars(), + benchmark.variable.nvars+benchmark.design_variable.nvars, benchmark.variable.get_statistics('mean'), benchmark.design_var_indices) init_guess = 2*np.ones((2, 1)) @@ -79,11 +79,11 @@ def test_cantilever_beam_gradients(self): benchmark.variable, nsamples) constraint_fun = ActiveSetVariableModel( benchmark.constraint_fun, - benchmark.variable.num_vars()+benchmark.design_variable.num_vars(), + benchmark.variable.nvars+benchmark.design_variable.nvars, samples, benchmark.design_var_indices) constraint_jac = ActiveSetVariableModel( benchmark.constraint_jac, - benchmark.variable.num_vars()+benchmark.design_variable.num_vars(), + benchmark.variable.nvars+benchmark.design_variable.nvars, samples, benchmark.design_var_indices) init_guess = 2*np.ones((2, 1)) errors = pya.check_gradients( diff --git a/pyapprox/examples/adaptive_leja_interpolation.py b/pyapprox/examples/adaptive_leja_interpolation.py index dd7afdbd..ac32e113 100644 --- a/pyapprox/examples/adaptive_leja_interpolation.py +++ b/pyapprox/examples/adaptive_leja_interpolation.py @@ -36,7 +36,7 @@ def genz_example(max_num_samples,precond_type): c = np.array([10,0.00]) model = GenzFunction( - "oscillatory",variable.num_vars(),c=c,w=np.zeros_like(c)) + "oscillatory",variable.nvars,c=c,w=np.zeros_like(c)) # model.set_coefficients(4,'exponential-decay') validation_samples = generate_independent_random_samples( @@ -51,9 +51,9 @@ def callback(pce): num_samples.append(pce.samples.shape[1]) candidate_samples=-np.cos( - np.random.uniform(0,np.pi,(var_trans.num_vars(),int(1e4)))) + np.random.uniform(0,np.pi,(var_trans.nvars,int(1e4)))) pce = AdaptiveLejaPCE( - var_trans.num_vars(),candidate_samples,factorization_type='fast') + var_trans.nvars,candidate_samples,factorization_type='fast') if precond_type=='density': def precond_function(basis_matrix,samples): trans_samples = var_trans.map_from_canonical_space(samples) diff --git a/pyapprox/gaussian_network.py b/pyapprox/gaussian_network.py index affbfd5b..4fefde8e 100644 --- a/pyapprox/gaussian_network.py +++ b/pyapprox/gaussian_network.py @@ -815,7 +815,7 @@ def get_total_degree_polynomials(univariate_variables,degrees): var_trans = AffineRandomVariableTransformation(univariate_variables[ii]) poly_opts = define_poly_options_from_variable_transformation(var_trans) poly.configure(poly_opts) - indices=compute_hyperbolic_indices(var_trans.num_vars(),degrees[ii],1.0) + indices=compute_hyperbolic_indices(var_trans.nvars,degrees[ii],1.0) poly.set_indices(indices) polys.append(poly) nparams.append(indices.shape[1]) diff --git a/pyapprox/gaussian_process.py b/pyapprox/gaussian_process.py index c22a5dd6..df1478f1 100644 --- a/pyapprox/gaussian_process.py +++ b/pyapprox/gaussian_process.py @@ -623,7 +623,7 @@ def integrate_xi_1(xx_1d, ww_1d, lscale_ii): def get_gaussian_process_squared_exponential_kernel_1d_integrals( X_train, length_scale, variable, transform_quad_rules, nquad_samples=50, skip_xi_1=False): - nvars = variable.num_vars() + nvars = variable.nvars degrees = [nquad_samples]*nvars univariate_quad_rules, pce = get_univariate_quadrature_rules_from_variable( variable, degrees) @@ -1807,7 +1807,7 @@ def get_univariate_quadrature_rule(self, ii): return xx_1d, ww_1d def precompute_gauss_quadrature(self): - nvars = self.variables.num_vars() + nvars = self.variables.nvars length_scale = self.kernel.length_scale if np.isscalar(length_scale): length_scale = [length_scale]*nvars @@ -2392,7 +2392,7 @@ def marginalize_gaussian_process(gp, variable, center=True): kernel_var /= float(gp._y_train_std**2) length_scale = np.atleast_1d(kernel_length_scale) - nvars = variable.num_vars() + nvars = variable.nvars marginalized_gps = [] for ii in range(nvars): tau = np.prod(np.array(tau_list)[:ii], axis=0)*np.prod( @@ -2455,7 +2455,7 @@ def _compute_expected_sobol_indices( nquad_samples=nquad_samples, skip_xi_1=True) # ntrain_samples = x_train.shape[1] - nvars = variable.num_vars() + nvars = variable.nvars degrees = [nquad_samples]*nvars univariate_quad_rules, pce = get_univariate_quadrature_rules_from_variable( variable, degrees) @@ -2562,7 +2562,7 @@ def generate_gp_realizations(gp, ngp_realizations, ninterpolation_samples, generate_random_samples = None from pyapprox.gaussian_process import generate_gp_candidate_samples candidate_samples = generate_gp_candidate_samples( - variable.num_vars(), ncandidate_samples, generate_random_samples, + variable.nvars, ncandidate_samples, generate_random_samples, variable) gp_realizations.fit( candidate_samples, rand_noise, ninterpolation_samples, diff --git a/pyapprox/models/wrappers.py b/pyapprox/models/wrappers.py index 5efa1180..888aae7e 100644 --- a/pyapprox/models/wrappers.py +++ b/pyapprox/models/wrappers.py @@ -631,11 +631,11 @@ def get_active_set_model_from_variable(function, variable, active_var_indices, from pyapprox import IndependentMultivariateRandomVariable active_variable = IndependentMultivariateRandomVariable( [variable.all_variables()[ii] for ii in active_var_indices]) - mask = np.ones(variable.num_vars(), dtype=bool) + mask = np.ones(variable.nvars, dtype=bool) mask[active_var_indices] = False inactive_var_values = nominal_values[mask] model = ActiveSetVariableModel( - function, variable.num_vars(), inactive_var_values, active_var_indices) + function, variable.nvars, inactive_var_values, active_var_indices) return model, active_variable diff --git a/pyapprox/multivariate_gaussian.py b/pyapprox/multivariate_gaussian.py index c23a772e..a99ea4a5 100644 --- a/pyapprox/multivariate_gaussian.py +++ b/pyapprox/multivariate_gaussian.py @@ -144,7 +144,7 @@ def __call__(self, vectors, transpose): return self.apply(vectors, None) def num_rows(self): - return self.sqrt_covariance_operator.num_vars() + return self.sqrt_covariance_operator.nvars def num_cols(self): return self.num_rows() @@ -154,8 +154,8 @@ class MultivariateGaussian(object): def __init__(self,sqrt_covariance_operator,mean=0.): self.sqrt_covariance_operator=sqrt_covariance_operator if np.isscalar(mean): - mean = mean*np.ones((self.num_vars())) - assert mean.ndim==1 and mean.shape[0]==self.num_vars() + mean = mean*np.ones((self.nvars)) + assert mean.ndim==1 and mean.shape[0]==self.nvars self.mean=mean def num_vars(self): @@ -177,7 +177,7 @@ def apply_covariance_sqrt(self, vectors, transpose): return self.sqrt_covariance_operator(vectors, transpose) def generate_samples(self,nsamples): - std_normal_samples = np.random.normal(0.,1.,(self.num_vars(),nsamples)) + std_normal_samples = np.random.normal(0.,1.,(self.nvars,nsamples)) samples = self.apply_covariance_sqrt(std_normal_samples,False) samples += self.mean[:,np.newaxis] return samples @@ -189,7 +189,7 @@ def pointwise_variance(self,active_indices=None): """ covariance_operator=CovarianceOperator(self.sqrt_covariance_operator) return get_operator_diagonal( - covariance_operator,self.num_vars(), + covariance_operator,self.nvars, self.sqrt_covariance_operator.eval_concurrency, active_indices=active_indices) diff --git a/pyapprox/polynomial_chaos/multivariate_polynomials.py b/pyapprox/polynomial_chaos/multivariate_polynomials.py index d5d6e1b1..def31d37 100644 --- a/pyapprox/polynomial_chaos/multivariate_polynomials.py +++ b/pyapprox/polynomial_chaos/multivariate_polynomials.py @@ -257,7 +257,7 @@ def __sub__(self, other): def __pow__(self, order): poly = get_polynomial_from_variable(self.var_trans.variable) if order == 0: - poly.set_indices(np.zeros([self.num_vars(), 1], dtype=int)) + poly.set_indices(np.zeros([self.nvars, 1], dtype=int)) poly.set_coefficients(np.ones([1, self.coefficients.shape[1]])) return poly @@ -304,14 +304,14 @@ def configure(self, opts): """ self.var_trans = opts["var_trans"] self.config_opts = opts - self.max_degree = -np.ones(self.num_vars(), dtype=int) + self.max_degree = -np.ones(self.nvars, dtype=int) def update_recursion_coefficients(self, num_coefs_per_var): num_coefs_per_var = np.atleast_1d(num_coefs_per_var) initializing = False if self.basis_type_index_map is None: initializing = True - self.basis_type_index_map = np.zeros((self.num_vars()), dtype=int) + self.basis_type_index_map = np.zeros((self.nvars), dtype=int) ii = 0 for key, poly_opts in self.config_opts['poly_types'].items(): if (initializing or ( @@ -337,7 +337,7 @@ def update_recursion_coefficients(self, num_coefs_per_var): self.basis_type_index_map[self.basis_type_var_indices[ii]] = ii ii += 1 if (np.unique(np.hstack(self.basis_type_var_indices)).shape[0] != - self.num_vars()): + self.nvars): msg = 'poly_opts does not specify a basis for each input ' msg += 'variable' raise ValueError(msg) @@ -348,7 +348,7 @@ def set_indices(self, indices): indices = indices.reshape((1, indices.shape[0])) self.indices = indices - assert indices.shape[0] == self.num_vars() + assert indices.shape[0] == self.nvars max_degree = indices.max(axis=1) if np.any(self.max_degree < max_degree): self.update_recursion_coefficients(max_degree+1) @@ -356,7 +356,7 @@ def set_indices(self, indices): def basis_matrix(self, samples, opts=dict()): assert samples.ndim == 2 - assert samples.shape[0] == self.num_vars() + assert samples.shape[0] == self.nvars canonical_samples = self.var_trans.map_to_canonical_space( samples) basis_matrix = self.canonical_basis_matrix(canonical_samples, opts) @@ -403,7 +403,14 @@ def value(self, samples): return np.dot(basis_matrix, self.coefficients) def num_vars(self): - return self.var_trans.num_vars() + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) + return self.var_trans.nvars + + @property + def nvars(self): + return self.var_trans.nvars def __call__(self, samples): return self.value(samples) @@ -452,7 +459,7 @@ def num_terms(self): def get_univariate_quadrature_rules_from_pce(pce, degrees): - num_vars = pce.num_vars() + num_vars = pce.nvars degrees = np.atleast_1d(degrees) if degrees.shape[0] == 1 and num_vars > 1: degrees = np.array([degrees[0]]*num_vars) @@ -483,11 +490,11 @@ def get_univariate_quadrature_rules_from_pce(pce, degrees): def get_univariate_quadrature_rules_from_variable(variable, degrees): - assert len(degrees) == variable.num_vars() + assert len(degrees) == variable.nvars pce = get_polynomial_from_variable(variable) indices = [] - for ii in range(pce.num_vars()): - indices_ii = np.zeros((pce.num_vars(), degrees[ii]+1), dtype=int) + for ii in range(pce.nvars): + indices_ii = np.zeros((pce.nvars, degrees[ii]+1), dtype=int) indices_ii[ii, :] = np.arange(degrees[ii]+1, dtype=int) indices.append(indices_ii) pce.set_indices(np.hstack(indices)) @@ -501,7 +508,7 @@ def get_tensor_product_quadrature_rule_from_pce(pce, degrees): pce, degrees) canonical_samples, weights = \ get_tensor_product_quadrature_rule( - degrees+1, pce.num_vars(), univariate_quadrature_rules) + degrees+1, pce.nvars, univariate_quadrature_rules) samples = pce.var_trans.map_from_canonical_space( canonical_samples) return samples, weights @@ -567,7 +574,7 @@ def conditional_moments_of_polynomial_chaos_expansion( poly.recursion_coeffs[poly.basis_type_index_map[inactive_idx[dd]]]) basis_vals_1d.append(basis_vals_1d_dd) - active_idx = np.setdiff1d(np.arange(poly.num_vars()), inactive_idx) + active_idx = np.setdiff1d(np.arange(poly.nvars), inactive_idx) mean = coef[0].copy() for ii in range(1, indices.shape[1]): index = indices[:, ii] @@ -600,12 +607,12 @@ def marginalize_polynomial_chaos_expansion(poly, inactive_idx, center=True): # poly.config_opts.copy will not work opts = copy.deepcopy(poly.config_opts) all_variables = poly.var_trans.variable.all_variables() - active_idx = np.setdiff1d(np.arange(poly.num_vars()), inactive_idx) + active_idx = np.setdiff1d(np.arange(poly.nvars), inactive_idx) active_variables = IndependentMultivariateRandomVariable( [all_variables[ii] for ii in active_idx]) opts['var_trans'] = AffineRandomVariableTransformation(active_variables) - marginalized_var_nums = -np.ones(poly.num_vars()) + marginalized_var_nums = -np.ones(poly.nvars) marginalized_var_nums[active_idx] = np.arange(active_idx.shape[0]) keys_to_delete = [] for key, poly_opts in opts['poly_types'].items(): @@ -679,7 +686,7 @@ def compute_product_coeffs_1d_for_each_variable( poly, max_degrees1, max_degrees2): # must ensure that poly1 and poly2 have the same basis types # in each dimension - num_vars = poly.num_vars() + num_vars = poly.nvars def get_recursion_coefficients(N, dd): poly.update_recursion_coefficients([N]*num_vars) diff --git a/pyapprox/polynomial_chaos/tests/test_multivariate_polynomials.py b/pyapprox/polynomial_chaos/tests/test_multivariate_polynomials.py index ed405ae3..7b1fce9e 100644 --- a/pyapprox/polynomial_chaos/tests/test_multivariate_polynomials.py +++ b/pyapprox/polynomial_chaos/tests/test_multivariate_polynomials.py @@ -695,12 +695,12 @@ def test_multiply_multivariate_orthonormal_polynomial_expansions(self): degree1, degree2 = 3, 2 poly1 = get_polynomial_from_variable(variable) poly1.set_indices(compute_hyperbolic_indices( - variable.num_vars(), degree1)) + variable.nvars, degree1)) poly1.set_coefficients(np.random.normal( 0, 1, (poly1.indices.shape[1], 1))) poly2 = get_polynomial_from_variable(variable) poly2.set_indices(compute_hyperbolic_indices( - variable.num_vars(), degree2)) + variable.nvars, degree2)) poly2.set_coefficients(np.random.normal( 0, 1, (poly2.indices.shape[1], 1))) @@ -732,10 +732,10 @@ def test_multiply_pce(self): degree1, degree2 = 1, 2 poly1 = get_polynomial_from_variable(variable) poly1.set_indices(compute_hyperbolic_indices( - variable.num_vars(), degree1)) + variable.nvars, degree1)) poly2 = get_polynomial_from_variable(variable) poly2.set_indices(compute_hyperbolic_indices( - variable.num_vars(), degree2)) + variable.nvars, degree2)) # coef1 = np.random.normal(0,1,(poly1.indices.shape[1],1)) # coef2 = np.random.normal(0,1,(poly2.indices.shape[1],1)) @@ -759,12 +759,12 @@ def test_add_pce(self): degree1, degree2 = 2, 3 poly1 = get_polynomial_from_variable(variable) poly1.set_indices(compute_hyperbolic_indices( - variable.num_vars(), degree1)) + variable.nvars, degree1)) poly1.set_coefficients(np.random.normal( 0, 1, (poly1.indices.shape[1], 1))) poly2 = get_polynomial_from_variable(variable) poly2.set_indices(compute_hyperbolic_indices( - variable.num_vars(), degree2)) + variable.nvars, degree2)) poly2.set_coefficients(np.random.normal( 0, 1, (poly2.indices.shape[1], 1))) @@ -958,7 +958,7 @@ def function(x): return (x.T)**2 var_trans) pce.configure(pce_opts) pce.set_indices( - compute_hyperbolic_indices(var_trans.num_vars(), degree, 1.)) + compute_hyperbolic_indices(var_trans.nvars, degree, 1.)) nsamples = int(1e6) samples = lognorm.rvs(nsamples)[None, :] diff --git a/pyapprox/polynomial_sampling.py b/pyapprox/polynomial_sampling.py index adc1c1d6..f4c8ca9a 100644 --- a/pyapprox/polynomial_sampling.py +++ b/pyapprox/polynomial_sampling.py @@ -268,7 +268,7 @@ def get_oli_leja_samples(pce, generate_candidate_samples, oli_solver.set_basis_generator(basis_generator) - num_vars = pce.num_vars() + num_vars = pce.nvars max_degree = get_total_degree(num_vars, num_leja_samples) indices = compute_hyperbolic_indices(num_vars, max_degree, 1.) # warning this assumes basis generator is always compute_hyperbolic_indices diff --git a/pyapprox/probability_measure_sampling.py b/pyapprox/probability_measure_sampling.py index a7ca0e53..101ed180 100644 --- a/pyapprox/probability_measure_sampling.py +++ b/pyapprox/probability_measure_sampling.py @@ -135,7 +135,7 @@ def generate_independent_random_samples_deprecated(var_trans, num_samples): """ assert type(var_trans) == AffineRandomVariableTransformation, \ "`var_trans` must be of AffineRandomVariableTransformation type" - num_vars = var_trans.num_vars() + num_vars = var_trans.nvars canonical_samples = np.empty((num_vars, num_samples), dtype=float) variables = var_trans.variables @@ -170,7 +170,7 @@ def generate_independent_random_samples(variable, num_samples, """ assert type(variable) == IndependentMultivariateRandomVariable, \ "`variable` must be of IndependentMultivariateRandomVariable type" - num_vars = variable.num_vars() + num_vars = variable.nvars num_samples = int(num_samples) samples = np.empty((num_vars, num_samples), dtype=float) for ii in range(variable.nunique_vars): diff --git a/pyapprox/sensitivity_analysis.py b/pyapprox/sensitivity_analysis.py index b378307e..dc21ef63 100644 --- a/pyapprox/sensitivity_analysis.py +++ b/pyapprox/sensitivity_analysis.py @@ -708,7 +708,7 @@ def get_AB_sample_sets_for_sobol_sensitivity_analysis( samplesA = generate_independent_random_samples(variables, nsamples) samplesB = generate_independent_random_samples(variables, nsamples) elif method == 'halton' or 'sobol': - nvars = variables.num_vars() + nvars = variables.nvars if method == 'halton': qmc_samples = halton_sequence( 2*nvars, qmc_start_index, qmc_start_index+nsamples) diff --git a/pyapprox/system_analysis.py b/pyapprox/system_analysis.py index 06e14a2b..9d37f436 100644 --- a/pyapprox/system_analysis.py +++ b/pyapprox/system_analysis.py @@ -218,9 +218,9 @@ def initialize_surrogate( variables, enforce_variable_bounds, univariate_quad_rule_info, quad_method, growth_incr) - nvars = var_trans.num_vars() + nvars = var_trans.nvars if config_var_trans is not None: - nvars += config_var_trans.num_vars() + nvars += config_var_trans.nvars if max_level_1d is None: max_level_1d = [np.inf]*nvars diff --git a/pyapprox/tests/test_approximate.py b/pyapprox/tests/test_approximate.py index 86b926e9..f6698d64 100644 --- a/pyapprox/tests/test_approximate.py +++ b/pyapprox/tests/test_approximate.py @@ -137,7 +137,7 @@ def test_approximate_polynomial_chaos_induced(self): def test_approximate_polynomial_chaos_custom_poly_type(self): benchmark = setup_benchmark("ishigami", a=7, b=0.1) - nvars = benchmark.variable.num_vars() + nvars = benchmark.variable.nvars # this test purposefully select wrong variable to make sure # poly_type overide is activated univariate_variables = [stats.beta(5, 5, -np.pi, 2*np.pi)]*nvars @@ -507,7 +507,7 @@ def test_cross_validate_approximation_after_regularization_selection(self): def test_approximate_neural_network(self): np.random.seed(2) benchmark = setup_benchmark("ishigami", a=7, b=0.1) - nvars = benchmark.variable.num_vars() + nvars = benchmark.variable.nvars nqoi = 1 maxiter = 30000 print(benchmark.variable) diff --git a/pyapprox/tests/test_control_variate_monte_carlo.py b/pyapprox/tests/test_control_variate_monte_carlo.py index 7c6ee7f5..c7032dd1 100644 --- a/pyapprox/tests/test_control_variate_monte_carlo.py +++ b/pyapprox/tests/test_control_variate_monte_carlo.py @@ -180,7 +180,7 @@ def m4(self,samples): return (1 - M/(b*(h**2)*Y) - (P*(1 + M)/(h*Y))**2)[:,np.newaxis] def get_quadrature_rule(self): - nvars = self.variable.num_vars() + nvars = self.variable.nvars degrees=[10]*nvars var_trans = pya.AffineRandomVariableTransformation(self.variable) gauss_legendre = partial( @@ -189,7 +189,7 @@ def get_quadrature_rule(self): gauss_legendre,gauss_legendre,pya.gauss_hermite_pts_wts_1D, pya.gauss_hermite_pts_wts_1D,pya.gauss_hermite_pts_wts_1D] x,w = pya.get_tensor_product_quadrature_rule( - degrees,self.variable.num_vars(),univariate_quadrature_rules, + degrees,self.variable.nvars,univariate_quadrature_rules, var_trans.map_from_canonical_space) return x,w diff --git a/pyapprox/tests/test_gaussian_process.py b/pyapprox/tests/test_gaussian_process.py index 8bf6200c..646b3896 100644 --- a/pyapprox/tests/test_gaussian_process.py +++ b/pyapprox/tests/test_gaussian_process.py @@ -870,7 +870,7 @@ def test_generate_gp_realizations(self): # [stats.uniform(lb, ub-lb)])#, stats.uniform(1e4, 1e5-1e4)]) # length_scale = (ub-lb)/10 - nvars = variable.num_vars() + nvars = variable.nvars fkernel = Matern(length_scale, length_scale_bounds='fixed', nu=np.inf) fkernel = ConstantKernel( diff --git a/pyapprox/tests/test_induced_sampling.py b/pyapprox/tests/test_induced_sampling.py index 308b3b7d..ba421e23 100644 --- a/pyapprox/tests/test_induced_sampling.py +++ b/pyapprox/tests/test_induced_sampling.py @@ -90,7 +90,7 @@ def help_discrete_induced_sampling(self, var1, var2, envelope_factor): pce = PolynomialChaosExpansion() pce.configure(pce_opts) - indices = compute_hyperbolic_indices(pce.num_vars(), degree, 1.0) + indices = compute_hyperbolic_indices(pce.nvars, degree, 1.0) pce.set_indices(indices) num_samples = int(3e4) @@ -144,7 +144,7 @@ def generate_proposal_samples(n): # pce.basis_matrix here. If use canonical_basis_matrix then # densities must be mapped to this space also which can be difficult samples2 = random_induced_measure_sampling( - num_samples, pce.num_vars(), pce.basis_matrix, density, + num_samples, pce.nvars, pce.basis_matrix, density, proposal_density, generate_proposal_samples, envelope_factor) def induced_density(x): diff --git a/pyapprox/tests/test_orthogonal_least_interpolation.py b/pyapprox/tests/test_orthogonal_least_interpolation.py index 55be0495..fc937cfb 100644 --- a/pyapprox/tests/test_orthogonal_least_interpolation.py +++ b/pyapprox/tests/test_orthogonal_least_interpolation.py @@ -132,7 +132,7 @@ def test_block_diagonal_matrix_pre_multiply(self): def get_tensor_product_points(level, var_trans, quad_type): abscissa_1d = [] - num_vars = var_trans.num_vars() + num_vars = var_trans.nvars if quad_type == 'CC': x, w = clenshaw_curtis_pts_wts_1D(level) elif quad_type == 'GH': diff --git a/pyapprox/tests/test_sensitivity_analysis.py b/pyapprox/tests/test_sensitivity_analysis.py index 82cfc013..a8ef6d88 100644 --- a/pyapprox/tests/test_sensitivity_analysis.py +++ b/pyapprox/tests/test_sensitivity_analysis.py @@ -274,7 +274,7 @@ def test_qmc_sobol_sensitivity_analysis_ishigami(self): benchmark = setup_benchmark("ishigami", a=7, b=0.1) nsamples = 10000 - nvars = benchmark.variable.num_vars() + nvars = benchmark.variable.nvars order = 3 interaction_terms = pya.compute_hyperbolic_indices(nvars, order) interaction_terms = interaction_terms[:, np.where( @@ -306,7 +306,7 @@ def test_repeat_qmc_sobol_sensitivity_analysis_ishigami(self): benchmark = setup_benchmark("ishigami", a=7, b=0.1) nsamples = 10000 - nvars = benchmark.variable.num_vars() + nvars = benchmark.variable.nvars order = 3 interaction_terms = pya.compute_hyperbolic_indices(nvars, order) interaction_terms = interaction_terms[:, np.where( @@ -346,7 +346,7 @@ def test_qmc_sobol_sensitivity_analysis_oakley(self): benchmark = setup_benchmark("oakley") nsamples = 100000 - nvars = benchmark.variable.num_vars() + nvars = benchmark.variable.nvars order = 1 interaction_terms = pya.compute_hyperbolic_indices(nvars, order) interaction_terms = interaction_terms[:, np.where( @@ -371,7 +371,7 @@ def test_sampling_based_sobol_indices_from_gaussian_process(self): from pyapprox.benchmarks.benchmarks import setup_benchmark from pyapprox.approximate import approximate benchmark = setup_benchmark("ishigami", a=7, b=0.1) - nvars = benchmark.variable.num_vars() + nvars = benchmark.variable.nvars # nsobol_samples and ntrain_samples effect assert tolerances ntrain_samples = 500 @@ -468,7 +468,7 @@ def test_analytic_sobol_indices_from_gaussian_process(self): from pyapprox.benchmarks.benchmarks import setup_benchmark from pyapprox.approximate import approximate benchmark = setup_benchmark("ishigami", a=7, b=0.1) - nvars = benchmark.variable.num_vars() + nvars = benchmark.variable.nvars ntrain_samples = 500 # train_samples = pya.generate_independent_random_samples( diff --git a/pyapprox/tests/test_sparse_grid.py b/pyapprox/tests/test_sparse_grid.py index fbbf2361..2332d065 100644 --- a/pyapprox/tests/test_sparse_grid.py +++ b/pyapprox/tests/test_sparse_grid.py @@ -1684,6 +1684,9 @@ def map_from_canonical_space(self, canonical_samples): return samples def num_vars(self): + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) return self.nvars @@ -1894,7 +1897,7 @@ def test_combination_sparse_grid_setup(self): variable = IndependentMultivariateRandomVariable( univariate_variables) var_trans = AffineRandomVariableTransformation(variable) - sparse_grid = CombinationSparseGrid(var_trans.num_vars()) + sparse_grid = CombinationSparseGrid(var_trans.nvars) quad_rules, growth_rules, unique_quadrule_indices, \ unique_max_level_1d = \ get_sparse_grid_univariate_leja_quadrature_rules_economical( @@ -1931,7 +1934,7 @@ def function(samples): variable = IndependentMultivariateRandomVariable( univariate_variables) var_trans = AffineRandomVariableTransformation(variable) - sparse_grid = CombinationSparseGrid(var_trans.num_vars()) + sparse_grid = CombinationSparseGrid(var_trans.nvars) admissibility_function = partial( max_level_admissibility_function, np.inf, [12]*num_vars, 100, 0, verbose=False) diff --git a/pyapprox/variable_transformations.py b/pyapprox/variable_transformations.py index f8f92235..078d2015 100644 --- a/pyapprox/variable_transformations.py +++ b/pyapprox/variable_transformations.py @@ -112,6 +112,9 @@ def map_to_canonical_space(self, user_samples): user_samples, self.user_ranges, self.canonical_ranges) def num_vars(self): + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) return self.nvars @@ -230,8 +233,8 @@ def map_derivatives_from_canonical_space(self, derivatives): Derivatives can also be (nvars, nsamples) - transpose of Jacobian - Here each sample is considered a different QoI """ - assert derivatives.shape[0] % self.num_vars() == 0 - num_samples = int(derivatives.shape[0]/self.num_vars()) + assert derivatives.shape[0] % self.nvars == 0 + num_samples = int(derivatives.shape[0]/self.nvars) mapped_derivatives = derivatives.copy() for ii in range(self.variable.nunique_vars): var_indices = self.variable.unique_variable_indices[ii] @@ -254,8 +257,8 @@ def map_derivatives_to_canonical_space(self, canonical_derivatives): Derivatives can also be (nvars, nsamples) - transpose of Jacobian - Here each sample is considered a different QoI """ - assert canonical_derivatives.shape[0] % self.num_vars() == 0 - num_samples = int(canonical_derivatives.shape[0]/self.num_vars()) + assert canonical_derivatives.shape[0] % self.nvars == 0 + num_samples = int(canonical_derivatives.shape[0]/self.nvars) derivatives = canonical_derivatives.copy() for ii in range(self.variable.nunique_vars): var_indices = self.variable.unique_variable_indices[ii] @@ -266,7 +269,14 @@ def map_derivatives_to_canonical_space(self, canonical_derivatives): return derivatives def num_vars(self): - return self.variable.num_vars() + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) + return self.variable.nvars + + @property + def nvars(self): + return self.variable.nvars def samples_of_bounded_variables_inside_domain(self, samples): for ii in range(self.variable.nunique_vars): @@ -282,7 +292,7 @@ def samples_of_bounded_variables_inside_domain(self, samples): return True def get_ranges(self): - ranges = np.empty((2*self.num_vars()), dtype=float) + ranges = np.empty((2*self.nvars), dtype=float) for ii in range(self.variable.nunique_vars): var = self.variable.unique_variables[ii] lb, ub = var.interval(1) @@ -305,7 +315,7 @@ def __init__(self, joint_density, num_vars, opts): self.tol = opts.get('tol', 1e-12) self.num_bins = opts.get('num_bins', 101) self.nvars = num_vars - self.canonical_variable_types = ['uniform']*self.num_vars() + self.canonical_variable_types = ['uniform']*self.nvars def map_from_canonical_space(self, canonical_samples): user_samples = inverse_rosenblatt_transformation( @@ -320,6 +330,9 @@ def map_to_canonical_space(self, user_samples): return canonical_samples def num_vars(self): + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) return self.nvars @@ -359,6 +372,9 @@ def map_to_canonical_space(self, user_samples): return canonical_samples def num_vars(self): + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) return self.nvars @@ -396,6 +412,9 @@ def map_to_canonical_space(self, user_samples): self.z_correlation_cholesky_factor) def num_vars(self): + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) return self.nvars @@ -427,4 +446,11 @@ def map_to_canonical_space(self, user_samples): return canonical_samples def num_vars(self): - return self.transformations[0].num_vars() + import warnings + warnings.warn("Use of `num_vars()` will be deprecated. Access property `.nvars` instead", + PendingDeprecationWarning) + return self.transformations[0].nvars + + @property + def nvars(self): + return self.transformations[0].nvars diff --git a/pyapprox/variables.py b/pyapprox/variables.py index 5fe7528b..d91ea17c 100644 --- a/pyapprox/variables.py +++ b/pyapprox/variables.py @@ -307,7 +307,7 @@ def get_statistics(self, function_name, **kwargs): stats_ii = np.atleast_1d(getattr(var, function_name)(**kwargs)) assert stats_ii.ndim == 1 if ii == 0: - stats = np.empty((self.num_vars(), stats_ii.shape[0])) + stats = np.empty((self.nvars, stats_ii.shape[0])) stats[indices] = stats_ii return stats @@ -320,12 +320,12 @@ def evaluate(self, function_name, x): stats_jj = np.atleast_1d(getattr(var, function_name)(x[jj, :])) assert stats_jj.ndim == 1 if stats is None: - stats = np.empty((self.num_vars(), stats_jj.shape[0])) + stats = np.empty((self.nvars, stats_jj.shape[0])) stats[jj] = stats_jj return stats def pdf(self, x, log=False): - assert x.shape[0] == self.num_vars() + assert x.shape[0] == self.nvars if log is False: marginal_vals = self.evaluate("pdf", x) else: @@ -335,9 +335,9 @@ def pdf(self, x, log=False): def __str__(self): variable_labels = self.variable_labels if variable_labels is None: - variable_labels = ["z%d" % ii for ii in range(self.num_vars())] + variable_labels = ["z%d" % ii for ii in range(self.nvars)] string = "I.I.D. Variable\n" - string += f"Number of variables: {self.num_vars()}\n" + string += f"Number of variables: {self.nvars}\n" string += "Unique variables and global id:\n" for ii in range(self.nunique_vars): var = self.unique_variables[ii] diff --git a/pyapprox/visualization.py b/pyapprox/visualization.py index 3aa08681..d7a74b81 100644 --- a/pyapprox/visualization.py +++ b/pyapprox/visualization.py @@ -483,7 +483,7 @@ def get_coefficients_for_plotting(pce, qoi_idx): indices = pce.indices.copy() assert coeff.shape[0] == indices.shape[1] - num_vars = pce.num_vars() + num_vars = pce.nvars degree = -1 indices_dict = dict() max_degree = indices.sum(axis=0).max() @@ -818,11 +818,11 @@ def plot_1d_cross_sections(fun, variable, nominal_sample=None, nominal_sample = variable.get_statistics("mean") if subplot_tuple is None: - nfig_rows, nfig_cols = 1, variable.num_vars() + nfig_rows, nfig_cols = 1, variable.nvars else: nfig_rows, nfig_cols = subplot_tuple - if nfig_rows*nfig_cols < variable.num_vars(): + if nfig_rows*nfig_cols < variable.nvars: raise ValueError("Number of subplots is insufficient") fig, axs = plt.subplots( @@ -834,7 +834,7 @@ def plot_1d_cross_sections(fun, variable, nominal_sample=None, plot_1d_cross_section( fun, var, ii, nominal_sample, nsamples_1d, axs[ii], qoi, plt_kwargs) - for ii in range(variable.num_vars(), nfig_rows*nfig_cols): + for ii in range(variable.nvars, nfig_rows*nfig_cols): axs[ii].axis("off") return fig, axs @@ -849,7 +849,7 @@ def plot_2d_cross_sections(fun, variable, nominal_sample=None, if variable_pairs is None: variable_pairs = np.array( - compute_anova_level_indices(variable.num_vars(), 2)) + compute_anova_level_indices(variable.nvars, 2)) # make first column values vary fastest so we plot lower triangular # matrix of subplots variable_pairs[:, 0], variable_pairs[:, 1] = \ @@ -859,7 +859,7 @@ def plot_2d_cross_sections(fun, variable, nominal_sample=None, raise ValueError("Variable pairs has the wrong shape") if subplot_tuple is None: - nfig_rows, nfig_cols = variable.num_vars(), variable.num_vars() + nfig_rows, nfig_cols = variable.nvars, variable.nvars else: nfig_rows, nfig_cols = subplot_tuple diff --git a/pyapprox_dev/pyapprox_dev/bayesian_inference/tests/test_markov_chain_monte_carlo.py b/pyapprox_dev/pyapprox_dev/bayesian_inference/tests/test_markov_chain_monte_carlo.py index 10beea4a..cc7b0dcd 100644 --- a/pyapprox_dev/pyapprox_dev/bayesian_inference/tests/test_markov_chain_monte_carlo.py +++ b/pyapprox_dev/pyapprox_dev/bayesian_inference/tests/test_markov_chain_monte_carlo.py @@ -139,7 +139,7 @@ def unnormalized_posterior(x): # a time vals = np.exp(loglike.loglike(x)) rvs = variables.all_variables() - for ii in range(variables.num_vars()): + for ii in range(variables.nvars): vals[:,0] *= rvs[ii].pdf(x[ii,:]) return vals @@ -148,7 +148,7 @@ def univariate_quadrature_rule(n): x*=2 return x,w x,w = get_tensor_product_quadrature_rule( - 100,variables.num_vars(),univariate_quadrature_rule) + 100,variables.nvars,univariate_quadrature_rule) evidence = unnormalized_posterior(x)[:,0].dot(w) #print('evidence',evidence) @@ -177,7 +177,7 @@ def univariate_quadrature_rule(n): print('MAP sample',map_sample) print('exact mean',exact_mean.squeeze()) print('MCMC mean',samples.mean(axis=1)) - assert np.allclose(map_sample,np.zeros((variables.num_vars(),1))) + assert np.allclose(map_sample,np.zeros((variables.nvars,1))) #tolerance 3e-2 can be exceeded for certain random runs assert np.allclose( exact_mean.squeeze(), samples.mean(axis=1),atol=3e-2) diff --git a/pyapprox_dev/tutorials/plot_advection_diffusion_model.py b/pyapprox_dev/tutorials/plot_advection_diffusion_model.py index 84ec2f7b..14ae3bf7 100644 --- a/pyapprox_dev/tutorials/plot_advection_diffusion_model.py +++ b/pyapprox_dev/tutorials/plot_advection_diffusion_model.py @@ -282,7 +282,7 @@ def generate_random_samples(m, n): model = benchmark.fun validation_levels = [5]*3 data = error_vs_cost( - model, partial(generate_random_samples, benchmark.variable.num_vars()), + model, partial(generate_random_samples, benchmark.variable.nvars), validation_levels) plot_error_vs_cost(data, 'time') plt.show() diff --git a/pyapprox_dev/tutorials/plot_bayesian_inference.py b/pyapprox_dev/tutorials/plot_bayesian_inference.py index 339d5af6..789584da 100644 --- a/pyapprox_dev/tutorials/plot_bayesian_inference.py +++ b/pyapprox_dev/tutorials/plot_bayesian_inference.py @@ -251,7 +251,7 @@ def data_obs_limit_state(samples, vals, data_obs): new_joint_covariance) new_mean, new_cov = condition_normal_on_data( new_joint.mean, new_joint.covariance, - np.arange(new_prior.num_vars(),new_prior.num_vars()+data.num_vars()), + np.arange(new_prior.nvars,new_prior.nvars+data.nvars), data_obs) posteriors[ii] = pya.NormalDensity(new_mean,new_cov) @@ -322,7 +322,7 @@ def data_obs_limit_state(samples, vals, data_obs): def unnormalized_posterior(x): vals = np.exp(loglike.loglike(x)) rvs = variables.all_variables() - for ii in range(variables.num_vars()): + for ii in range(variables.nvars): vals[:,0] *= rvs[ii].pdf(x[ii,:]) return vals @@ -331,7 +331,7 @@ def univariate_quadrature_rule(n): x*=2 return x,w x,w = pya.get_tensor_product_quadrature_rule( - 100,variables.num_vars(),univariate_quadrature_rule) + 100,variables.nvars,univariate_quadrature_rule) evidence = unnormalized_posterior(x)[:,0].dot(w) print('evidence',evidence) diff --git a/tutorials/polynomial_chaos/plot_adaptive_leja_interpolation.py b/tutorials/polynomial_chaos/plot_adaptive_leja_interpolation.py index d45f207c..d8d3c315 100644 --- a/tutorials/polynomial_chaos/plot_adaptive_leja_interpolation.py +++ b/tutorials/polynomial_chaos/plot_adaptive_leja_interpolation.py @@ -77,9 +77,9 @@ def callback(pce): max_num_samples = 200 error_tol = 1e-10 candidate_samples = -np.cos( - np.random.uniform(0, np.pi, (var_trans.num_vars(), int(1e4)))) + np.random.uniform(0, np.pi, (var_trans.nvars, int(1e4)))) pce = pya.AdaptiveLejaPCE( - var_trans.num_vars(), candidate_samples, factorization_type='fast') + var_trans.nvars, candidate_samples, factorization_type='fast') max_level = np.inf max_level_1d = [max_level]*(pce.num_vars) From 988f9433a18d215371506b1e4aaa31ad6818c934 Mon Sep 17 00:00:00 2001 From: ConnectedSystems Date: Sun, 29 Aug 2021 17:11:41 +1000 Subject: [PATCH 3/3] Add missing type conversion to address tests failing on Windows --- pyapprox/barycentric_interpolation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyapprox/barycentric_interpolation.py b/pyapprox/barycentric_interpolation.py index bf449861..f22ecec4 100644 --- a/pyapprox/barycentric_interpolation.py +++ b/pyapprox/barycentric_interpolation.py @@ -184,7 +184,7 @@ def multivariate_hierarchical_barycentric_lagrange_interpolation( result = \ multivariate_hierarchical_barycentric_lagrange_interpolation_pyx( - x, fn_vals, active_dims, + x, fn_vals, active_dims.astype(np.int64), active_abscissa_indices_1d.astype(np.int_), num_abscissa_1d.astype(np.int_), num_active_abscissa_1d.astype(np.int_),