Skip to content

Commit edd4e78

Browse files
committed
Fixes to allow non-lazy compenents.
1 parent dc4a5d9 commit edd4e78

File tree

1 file changed

+23
-15
lines changed

1 file changed

+23
-15
lines changed

lib/iris/aux_factory.py

Lines changed: 23 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import dask.array as da
1212
import numpy as np
1313

14-
from iris._lazy_data import concatenate
14+
from iris._lazy_data import concatenate, is_lazy_data
1515
from iris.common import CFVariableMixin, CoordMetadata, metadata_manager_factory
1616
import iris.coords
1717
from iris.warnings import IrisIgnoringBoundsWarning
@@ -119,17 +119,6 @@ def _derive_array(self, *dep_arrays, **other_args):
119119
"""
120120
result = self._calculate_array(*dep_arrays, **other_args)
121121

122-
# The dims of all the given components should be the same and, **presumably**,
123-
# the same as the result ??
124-
for i_dep, (dep, name) in enumerate(zip(dep_arrays, self.dependencies.keys())):
125-
if dep.ndim != result.ndim:
126-
msg = (
127-
f"Dependency #{i_dep}, '{name}' has ndims={dep.ndim}, "
128-
"not matching result {result.ndim!r}"
129-
" (shapes {dep.shape}/{result.shape})."
130-
)
131-
raise ValueError(msg)
132-
133122
# See if we need to improve on the chunking of the result
134123
from iris._lazy_data import _optimum_chunksize
135124
adjusted_chunks = _optimum_chunksize(
@@ -138,26 +127,45 @@ def _derive_array(self, *dep_arrays, **other_args):
138127
dtype=result.dtype,
139128
)
140129

141-
if adjusted_chunks != result.chunksize:
130+
# Does optimum_chunksize say we should have smaller chunks in some dimensions?
131+
if np.any(adjusted_chunks < result.chunksize):
132+
# co-broadcast all the deps to get same dimensions for each
133+
dep_arrays = np.broadcast_arrays(*dep_arrays)
134+
135+
# The dims of all the given components should now be the same and, *presumably*,
136+
# the same as the result ??
137+
for i_dep, (dep, name) in enumerate(zip(dep_arrays, self.dependencies.keys())):
138+
if dep.ndim != result.ndim:
139+
msg = (
140+
f"Dependency #{i_dep}, '{name}' has ndims={dep.ndim}, "
141+
f"not matching result {result.ndim!r}"
142+
f" : respective shapes {dep.shape}, {result.shape}."
143+
)
144+
raise ValueError(msg)
145+
142146
# Re-do the result calculation, re-chunking the inputs along dimensions
143147
# which it is suggested to reduce.
144148
# First make a (writable) copy of the inputs.....
145149
new_deps = []
146150
for i_dep, dep in enumerate(dep_arrays):
147151
# Reduce each dependency chunksize to the result chunksize if smaller.
148-
dep_chunks = dep.chunksize
152+
dep_chunks = dep.chunksize if is_lazy_data(dep) else dep.shape
149153
new_chunks = tuple([
150154
min(dep_chunk, adj_chunk)
151155
for dep_chunk, adj_chunk in zip(dep_chunks, adjusted_chunks)
152156
])
153157
# If the dep chunksize was reduced, replace with a rechunked version.
154158
if new_chunks != dep_chunks:
159+
if not is_lazy_data(dep):
160+
# I guess this is possible ?
161+
# TODO: needs a test
162+
dep = da.from_array(dep)
155163
dep = dep.rechunk(new_chunks)
156164
new_deps.append(dep)
157165

158166
# Finally, re-do the calculation, which hopefully results in a better
159167
# overall chunking for the result
160-
result = self._calculate_array(*new_deps)
168+
result = self._calculate_array(*new_deps, **other_args)
161169

162170
return result
163171

0 commit comments

Comments
 (0)