Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
109 commits
Select commit Hold shift + click to select a range
c59a606
refac: rename face_centered_vals -> cell_centered
rouson Oct 3, 2025
44dff9d
fix(gfortran): work around gfortran issues
rouson Oct 3, 2025
d4b6c1e
test(gradient): add 2nd line differentiation case
rouson Oct 5, 2025
af2958b
fix(matvec): use size() to set loop limits
rouson Oct 7, 2025
fda83f6
refac(test): mv code, rename function
rouson Oct 7, 2025
7d176f5
fix(grid): rm extra multiplicative factor of dx
rouson Oct 7, 2025
f19067f
test(grad): add passing test differentiataing line
rouson Oct 7, 2025
4947425
refac(cell_centers_ex): domain(:) -> x_{min,max}
rouson Oct 7, 2025
02a4f88
feat(cell_centers_ex): add cells_ component
rouson Oct 7, 2025
e8f06a7
refac(cell_centers_extended): rm grid_ component
rouson Oct 7, 2025
bc39e5f
test(gradient): unit test of d(parabola)/dx passes
rouson Oct 8, 2025
1be8055
refac(initializers): rm abstract initializer type
rouson Oct 8, 2025
c95d136
fix(cell_centers_extended_s): rm dead code
rouson Oct 8, 2025
6903274
refac(gradient_t): mv to separate module/submodule
rouson Oct 9, 2025
2d6788d
refac(cell_centers_extended_t): rename scalar_1D_t
rouson Oct 9, 2025
5444cb8
refac(gradient_{t,m,s}):rename gradient_1D_{t,m,s}
rouson Oct 9, 2025
22208bc
refac(gradient_operator):name gradient_operator_1D
rouson Oct 9, 2025
a66b974
refac(mimetic_matrix}: append "_1D"
rouson Oct 9, 2025
d4e5174
fix(scalar_1D_s): import operators
rouson Oct 9, 2025
665662f
fix(mimetic_matrix_s): count upper/lower rows
rouson Oct 9, 2025
6f89cbf
refac(mimetic_matrix_s): simpler loop bound
rouson Oct 9, 2025
5e4c16a
fix(4th-order): flip some coefficient signs
rouson Oct 9, 2025
1e6080e
feat(mimetic_matrix_1D): add file_t constructor
rouson Oct 10, 2025
2cfe3c3
fix(mimetic_matrix_t): reshape upper block (A)
rouson Oct 10, 2025
8be0932
refac(vector): rename matrix-vector RHS scalar_1D
rouson Oct 10, 2025
b4a675e
doc(gradient_operator_1D): clarify statement label
rouson Oct 10, 2025
e53bf8d
test(order): 2nd-/4th-order accuracy tests pass
rouson Oct 10, 2025
45359f6
build(gfortran): work around concurrent type-spec
rouson Oct 11, 2025
80f349a
build(gfortran-14): add locality specifier macro
rouson Oct 12, 2025
c77984b
build(gfortran): use GCC ver to define local spec
rouson Oct 12, 2025
fae4b50
test(CI): build/test gradient-operator branch
rouson Oct 13, 2025
b08928d
test(CI): reduce test matrix
rouson Oct 13, 2025
c3a0bba
refac(fortran):combine scalar_1D/vector_1D modules
rouson Oct 16, 2025
a2691b7
refac(tensors_1D_t): build class hierarchy
rouson Oct 16, 2025
62a15b3
refactor(grid_s): gather grid funcs in new submod
rouson Nov 21, 2025
cfcb07b
refactor: rename procedures to facilitate disambig
rouson Nov 21, 2025
f119eda
refactor(M): lift matrix block to module for reuse
rouson Nov 21, 2025
7df5dab
chore: blank-space edits
rouson Nov 21, 2025
eb4c4e9
feat(vector_1D_t): type-bound .div. operator
rouson Nov 22, 2025
53edc44
test(julienne): update dependency version to 3.3.0
rouson Nov 23, 2025
7ba8ce4
chore(grad test): rm unused operator
rouson Nov 23, 2025
cc9b884
test(divergence): 1st passing unit test
rouson Nov 23, 2025
2473fe1
refactor(grid_s): distrib funcs to scalar/vector
rouson Nov 23, 2025
b6b060f
WIP: print diagnostics in divergence test
rouson Nov 23, 2025
90cd0c1
fix(divergence_matrix_1D): delete rows of zeros
rouson Nov 23, 2025
808c31c
refactor(divergence_{,operator_}1D_s}): combine
rouson Nov 23, 2025
64c4f44
refactor(gradient_{,operator_}1D_s}): combine
rouson Nov 23, 2025
daae3a9
refactor(tensor_1D_t): define scalar/vector parent
rouson Nov 23, 2025
8ba2654
refactor(tensor): mk nonabstract, construct parent
rouson Nov 24, 2025
1bc8177
fix(.div.): 2nd/4th-order divergences
rouson Nov 24, 2025
46ed3d8
test(divergence): replace decl/def with associate
rouson Nov 24, 2025
bdd1793
refactor(scalar,vector): uniform nomenclature
rouson Nov 25, 2025
f2d0a96
test(.div.):order of accuracy for 2nd-order method
rouson Nov 25, 2025
ae4e30f
test(.div.):order of accuracy for 4th-order method
rouson Nov 25, 2025
0a034a0
chore: blank-space edits, new associate, renamings
rouson Nov 26, 2025
d482a7d
fix(.div.): locate scalars only at cell centers
rouson Nov 26, 2025
f8f18dd
chore(grad op test): blank-space edits
rouson Nov 26, 2025
d777dd8
test(grad):uniformly tighter convergence criterion
rouson Nov 26, 2025
1bc2f54
chore: more blank-space ed, associate, renam/reorg
rouson Nov 26, 2025
91b4f26
chore(divergence test):rm type not used explicitly
rouson Nov 26, 2025
a12c4ac
refactor: rm unnecessary gradient_1D_t child type
rouson Nov 27, 2025
cc66d0c
refactor: rm unneeded divbergence_1D_t child type
rouson Nov 27, 2025
62e3bab
feat(.laplacian.): add & test for scalar operands
rouson Nov 27, 2025
b5c4e25
test(laplacian): conditionally write gnuplot file
rouson Nov 28, 2025
a4e3c78
fix(laplacian): adjust tolerance, test domain
rouson Nov 29, 2025
1e3ae67
refactor(tensor): mk separate scalar,vector module
rouson Nov 29, 2025
c23356d
refactor(tensor): separate mimetic_matrix module
rouson Nov 29, 2025
aa2f75b
refactor(tensors_1D_m): rename module
rouson Nov 30, 2025
8620404
refactor(mimetic_matrix): mk operators child types
rouson Nov 30, 2025
86b09a0
refactor(matvec): improve type safety
rouson Nov 30, 2025
5bbf29a
refactor: renamings, rm unused vars, add comments
rouson Nov 30, 2025
3e64951
refactor: recombine tensor, scalar, vector modules
rouson Nov 30, 2025
d011b55
refactor(matvec): mk type-bound
rouson Nov 30, 2025
091f9b9
refactor(mimetic ops): associate & blank-space eds
rouson Nov 30, 2025
840c98d
chore: work around gfortran bugs
rouson Nov 30, 2025
9fa8bb9
feat(example): compute Laplacian = div grad
rouson Nov 30, 2025
6cc774e
fix(scalar): constructor samples at cell centers
rouson Nov 30, 2025
b7c2d1f
fix(divergence,laplacian): def separate types
rouson Nov 30, 2025
641b4b7
fix(example): correct column headings
rouson Nov 30, 2025
ed4575a
test(laplacian): use {laplacian,divergence}_1D_t
rouson Dec 1, 2025
ca1a380
fix(example): gfortran `associate` bugs workaround
rouson Dec 1, 2025
61a4f8d
test: work around gfortran bugs
rouson Dec 1, 2025
509fb70
chore(gfortran): less duplication, skip 2 fails
rouson Dec 1, 2025
384e170
refactor(example): redistrib macros, reform output
rouson Dec 2, 2025
f755a24
feat(assembly): divergence, gradient matrices
rouson Dec 3, 2025
a0f81e8
fix(divergence): store zero rows
rouson Dec 4, 2025
522f604
test(laplacian): convergence rate checks pass
rouson Dec 4, 2025
9ec73d6
fix(example): div-grad example works for LLVM/GCC
rouson Dec 4, 2025
acda925
Merge branch 'laplacian' into dev/fortran
rouson Dec 5, 2025
0667615
fix(div): adjust constraint, unit vector dimension
rouson Dec 5, 2025
fe2a649
fix(div): keep all of D
rouson Dec 5, 2025
5f10329
feat(print-assembled): add usage output
rouson Dec 5, 2025
42bc326
feat(example): add command-line flags
rouson Dec 5, 2025
73522d6
refactor(A,A'): rm top/bottom rows of zeros
rouson Dec 5, 2025
bd92c63
fix(divergence): rm top/bottom after mat-vec prod
rouson Dec 5, 2025
6529ecf
fix(laplacian test): fix array dimensions
rouson Dec 5, 2025
16f6dae
test(div): assert divegerence operator mat-vec len
rouson Dec 6, 2025
7b02111
test(laplacian): sep boundary/internal convergence
rouson Dec 6, 2025
ed0a588
test(divergence): blank-space edit
rouson Dec 6, 2025
8198d55
fix(div,grad): work around nag compiler issue
rouson Dec 6, 2025
f5be3e4
feat(vector,divergence): component constructors
rouson Dec 6, 2025
1ad8037
chore({gradient,scalar}_1D): rm unused files
rouson Dec 6, 2025
d5b2ad1
doc(UML): Fortran class diagram
rouson Dec 4, 2025
118e6d3
doc(UML): add operators
rouson Dec 4, 2025
2930a74
fix gfortran builds
rouson Dec 6, 2025
cf33717
test(CI): loosen 2 tolerances|run ifx single-image
rouson Dec 7, 2025
b1d224b
build(gfortran-{13,14} on ubuntu): workaround
rouson Dec 7, 2025
91380aa
build(gfortran-{13,14} on ubuntu): workaround
rouson Dec 7, 2025
ed154a9
build(gfortran-{13,14} on ubuntu): workaround
rouson Dec 7, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ jobs:
echo "FPM_FC=flang-new" >> "$GITHUB_ENV" ; \
elif [[ "$FC" =~ "ifx" ]] ; then \
echo "FPM_FC=ifx" >> "$GITHUB_ENV" ; \
echo "FFLAGS=-fpp -coarray $FFLAGS" >> "$GITHUB_ENV" ; \
echo "FFLAGS=-fpp -coarray -coarray-num-images=1 $FFLAGS" >> "$GITHUB_ENV" ; \
: ls -al /opt/intel/oneapi/compiler/2025.*/bin/ ; \
if type -p icpx ; then \
echo "FPM_CC=icx" >> "$GITHUB_ENV" ; \
Expand Down
51 changes: 51 additions & 0 deletions doc/fortran-classes.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
MOLE Fortran Class Diagram
--------------------------

```mermaid

%%{init: { 'theme':'neo', "class" : {"hideEmptyMembersBox": true} } }%%

classDiagram

class tensor_1D_t
class scalar_1D_t
class vector_1D_t
class divergence_1D_t
class laplacian_1D_t
class gradient_operator_1D_t
class divergence_operator_1D_t
class mimetic_matrix_1D_t

tensor_1D_t <|-- scalar_1D_t : is a
tensor_1D_t <|-- vector_1D_t : is a
tensor_1D_t <|-- divergence_1D_t : is a
divergence_1D_t <|-- laplacian_1D_t : is a
mimetic_matrix_1D_t <|-- gradient_operator_1D_t : is a
mimetic_matrix_1D_t <|-- divergence_operator_1D_t : is a

class scalar_1D_t{
- gradient_operator_1D_ : gradient_operator_1D_t
+ operator(.grad.) vector_1D_t
+ operator(.laplacian.) scalar_1D_t
}

class vector_1D_t{
- divergence_operator_1D_ : divergence_operator_1D_t
+ operator(.div.) divergence_1D_t
}

class mimetic_matrix_1D_t{
- upper_ :: double precision
- inner_ :: double precision
- lower_ :: double precision
}

class gradient_operator_1D_t{
+ operator(.x.) double precision[]
+ assemble() double precision[] "2D array"
}

class divergence_operator_1D_t{
+ operator(.x.) double precision[]
+ assemble() double precision[] "2D array"
}
133 changes: 133 additions & 0 deletions example/div-grad-laplacian-1D.F90
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
module functions_m
implicit none

contains

pure function f(x)
double precision, intent(in) :: x(:)
double precision, allocatable :: f(:)
f = (x**3)/6 + (x**2)/2 + 1
end function

double precision elemental function df_dx(x)
double precision, intent(in) :: x
df_dx = (x**2)/2 + x
end function

double precision elemental function d2f_dx2(x)
double precision, intent(in) :: x
d2f_dx2 = x + 1
end function

end module functions_m

program div_grad_laplacian_1D
use functions_m
use julienne_m, only : file_t, string_t, operator(.separatedBy.)
use mole_m, only : scalar_1D_t, scalar_1D_initializer_i
#ifdef __GFORTRAN__
use mole_m, only : vector_1D_t, laplacian_1D_t
#endif
implicit none

procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer => f

print *,new_line('')
print *," 2nd-order approximations"
print *," ========================"

call output(order=2)

print *,new_line('')
print *," 4th-order approximations"
print *," ========================"

call output(order=4)

contains

#ifndef __GFORTRAN__

subroutine output(order)
integer, intent(in) :: order

associate( s => scalar_1D_t(scalar_1D_initializer, order=order, cells=10, x_min=0D0, x_max=20D0))
associate( grad_s => .grad. s &
,laplacian_s => .laplacian. s)
associate( s_grid => s%grid() &
,grad_s_grid => grad_s%grid() &
,laplacian_s_grid => laplacian_s%grid())
associate( s_table => tabulate( &
string_t([character(len=18)::"x", "f(x) exp" , "f(x) act" ]) &
,s_grid, f(s_grid), s%values() &
) &
,grad_s_table => tabulate( &
string_t([character(len=18)::"x", ".grad. f exp" , ".grad. f act" ]) &
,grad_s_grid, df_dx(grad_s_grid), grad_s%values() &
) &
,laplacian_s_table => tabulate( &
string_t([character(len=18)::"x", ".laplacian. f exp", ".laplacian. f act"]) &
,laplacian_s_grid, d2f_dx2(laplacian_s_grid), laplacian_s%values()) &
)
call s_table%write_lines()
call grad_s_table%write_lines()
call laplacian_s_table%write_lines()
end associate
end associate
end associate
end associate
end subroutine

#else

subroutine output(order)
integer, intent(in) :: order

type(scalar_1D_t) s
type(vector_1D_t) grad_s
type(laplacian_1D_t) laplacian_s
type(file_t) s_table, grad_s_table, laplacian_s_table
double precision, allocatable,dimension(:) :: s_grid, grad_s_grid, laplacian_s_grid

s = scalar_1D_t(scalar_1D_initializer, order=order, cells=10, x_min=0D0, x_max=20D0)
grad_s = .grad. s
laplacian_s = .laplacian. s

s_grid = s%grid()
grad_s_grid = grad_s%grid()
laplacian_s_grid = laplacian_s%grid()

s_table = tabulate( &
string_t([character(len=18)::"x", "f(x) exp." , "f(x) act." ]) &
,s_grid, f(s_grid), s%values() &
)
grad_s_table = tabulate( &
string_t([character(len=18)::"x", ".grad. f exp." , ".grad. f act." ]) &
,grad_s_grid, df_dx(grad_s_grid), grad_s%values() &
)
laplacian_s_table = tabulate( &
string_t([character(len=18)::"x", ".laplacian. f exp.", ".laplacian. f act."]) &
,laplacian_s_grid, d2f_dx2(laplacian_s_grid), laplacian_s%values() &
)
call s_table%write_lines()
call grad_s_table%write_lines()
call laplacian_s_table%write_lines()
end subroutine

#endif

pure function tabulate(headings, abscissa, expected, actual) result(file)
double precision, intent(in), dimension(:) :: abscissa, expected, actual
type(string_t), intent(in) :: headings(:)
type(file_t) file
integer line

file = file_t([ &
string_t("") &
,headings .separatedBy. " " &
,string_t("----------------------------------------------------------") &
,[( string_t(abscissa(line)) // " " // string_t(expected(line)) // " " // string_t(actual(line)), line = 1, size(abscissa))] &
])
end function

end program
78 changes: 78 additions & 0 deletions example/print-assembled-1D-operators.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
program print_assembled_1D_operators
!! Print fully assembled memetic 1D gradient, divergence, and Laplacian matrices,
!! including the zero elements.
use julienne_m, only : operator(.csv.), string_t, command_line_t
use mimetic_operators_1D_m, only : gradient_operator_1D_t, divergence_operator_1D_t
implicit none

type(command_line_t) command_line
integer row

command_line_settings: &
associate( &
gradient => command_line%argument_present(["--grad" ]) &
,divergence => command_line%argument_present(["--div" ]) &
,order => command_line%flag_value("--order") &
)

if (command_line%argument_present([character(len=len("--help")) :: ("--help"), "-h"])) then
stop new_line('') // new_line('') &
// 'Usage:' // new_line('') &
// ' fpm run \' // new_line('') &
// ' --example print-assembled-1D-operators \' // new_line('') &
// ' --compiler flang-new \' // new_line('') &
// ' --flag "-O3" \' // new_line('') &
// ' -- [--help|-h] | [--grad] [--div] [--order <integer>]' // new_line('') // new_line('') &
// 'where square brackets indicate optional arguments and angular brackets indicate user input values.' // new_line('')
end if

default_usage: &
associate(print_all => .not. any([gradient, divergence, len(order)/=0]))

if (print_all .or. (gradient .and. len(order)==0) .or. (gradient .and. order=="2")) call print_gradient_operator( k=2, dx=1D0, m=5)
if (print_all .or. (divergence .and. len(order)==0) .or. (divergence .and. order=="2")) call print_divergence_operator(k=2, dx=1D0, m=5)
if (print_all .or. (gradient .and. len(order)==0) .or. (gradient .and. order=="4")) call print_gradient_operator( k=4, dx=1D0, m=9)
if (print_all .or. (divergence .and. len(order)==0) .or. (divergence .and. order=="4")) call print_divergence_operator(k=4, dx=1D0, m=9)

end associate default_usage
end associate command_line_settings

contains

subroutine print_gradient_operator(k, dx, m)
integer, intent(in) :: k, m
double precision, intent(in) :: dx

print *, new_line(""), "Gradient operator: order = ", k, " | cells = ", m, " | dx = ", dx

associate(grad_op => gradient_operator_1D_t(k, dx, cells=m))
associate(G => grad_op%assemble())
do row = 1, size(G,1)
associate(csv_row => .csv. string_t(G(row,:)))
print '(a)', csv_row%string()
end associate
end do
end associate
end associate

end subroutine

subroutine print_divergence_operator(k, dx, m)
integer, intent(in) :: k, m
double precision, intent(in) :: dx

print *, new_line(""), "Divergence operator: order = ", k, " | cells = ", m, " | dx = ", dx

associate(div_op => divergence_operator_1D_t(k, dx, cells=m))
associate(D => div_op%assemble())
do row = 1, size(D,1)
associate(csv_row => .csv. string_t(D(row,:)))
print '(a)', csv_row%string()
end associate
end do
end associate
end associate

end subroutine

end program
2 changes: 1 addition & 1 deletion fpm.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ name = "MOLE"
armadillo-code = {git = "https://gitlab.com/rouson/armadillo-code.git", tag = "fpm"}

[dev-dependencies]
julienne = {git = "https://github.com/berkeleylab/julienne.git", tag = "3.1.5"}
julienne = {git = "https://github.com/berkeleylab/julienne.git", tag = "3.3.0"}

[install]
library = true
33 changes: 33 additions & 0 deletions src/fortran/divergence_1D_s.F90
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
submodule(tensors_1D_m) divergence_1D_s
implicit none

contains

#ifdef __GFORTRAN__

pure function cell_center_locations(x_min, x_max, cells) result(x)
double precision, intent(in) :: x_min, x_max
integer, intent(in) :: cells
double precision, allocatable:: x(:)
integer cell

associate(dx => (x_max - x_min)/cells)
x = x_min + dx/2. + [((cell-1)*dx, cell = 1, cells)]
end associate
end function

#endif

module procedure construct_from_tensor
divergence_1D%tensor_1D_t = tensor_1D
end procedure

module procedure divergence_1D_values
cell_centered_values = self%values_
end procedure

module procedure divergence_1D_grid
cell_centers = cell_center_locations(self%x_min_, self%x_max_, self%cells_)
end procedure

end submodule divergence_1D_s
Loading