11
11
import dask .array as da
12
12
import numpy as np
13
13
14
- from iris ._lazy_data import concatenate
14
+ from iris ._lazy_data import concatenate , is_lazy_data
15
15
from iris .common import CFVariableMixin , CoordMetadata , metadata_manager_factory
16
16
import iris .coords
17
17
from iris .warnings import IrisIgnoringBoundsWarning
@@ -119,17 +119,6 @@ def _derive_array(self, *dep_arrays, **other_args):
119
119
"""
120
120
result = self ._calculate_array (* dep_arrays , ** other_args )
121
121
122
- # The dims of all the given components should be the same and, **presumably**,
123
- # the same as the result ??
124
- for i_dep , (dep , name ) in enumerate (zip (dep_arrays , self .dependencies .keys ())):
125
- if dep .ndim != result .ndim :
126
- msg = (
127
- f"Dependency #{ i_dep } , '{ name } ' has ndims={ dep .ndim } , "
128
- "not matching result {result.ndim!r}"
129
- " (shapes {dep.shape}/{result.shape})."
130
- )
131
- raise ValueError (msg )
132
-
133
122
# See if we need to improve on the chunking of the result
134
123
from iris ._lazy_data import _optimum_chunksize
135
124
adjusted_chunks = _optimum_chunksize (
@@ -138,26 +127,45 @@ def _derive_array(self, *dep_arrays, **other_args):
138
127
dtype = result .dtype ,
139
128
)
140
129
141
- if adjusted_chunks != result .chunksize :
130
+ # Does optimum_chunksize say we should have smaller chunks in some dimensions?
131
+ if np .any (adjusted_chunks < result .chunksize ):
132
+ # co-broadcast all the deps to get same dimensions for each
133
+ dep_arrays = np .broadcast_arrays (* dep_arrays )
134
+
135
+ # The dims of all the given components should now be the same and, *presumably*,
136
+ # the same as the result ??
137
+ for i_dep , (dep , name ) in enumerate (zip (dep_arrays , self .dependencies .keys ())):
138
+ if dep .ndim != result .ndim :
139
+ msg = (
140
+ f"Dependency #{ i_dep } , '{ name } ' has ndims={ dep .ndim } , "
141
+ f"not matching result { result .ndim !r} "
142
+ f" : respective shapes { dep .shape } , { result .shape } ."
143
+ )
144
+ raise ValueError (msg )
145
+
142
146
# Re-do the result calculation, re-chunking the inputs along dimensions
143
147
# which it is suggested to reduce.
144
148
# First make a (writable) copy of the inputs.....
145
149
new_deps = []
146
150
for i_dep , dep in enumerate (dep_arrays ):
147
151
# Reduce each dependency chunksize to the result chunksize if smaller.
148
- dep_chunks = dep .chunksize
152
+ dep_chunks = dep .chunksize if is_lazy_data ( dep ) else dep . shape
149
153
new_chunks = tuple ([
150
154
min (dep_chunk , adj_chunk )
151
155
for dep_chunk , adj_chunk in zip (dep_chunks , adjusted_chunks )
152
156
])
153
157
# If the dep chunksize was reduced, replace with a rechunked version.
154
158
if new_chunks != dep_chunks :
159
+ if not is_lazy_data (dep ):
160
+ # I guess this is possible ?
161
+ # TODO: needs a test
162
+ dep = da .from_array (dep )
155
163
dep = dep .rechunk (new_chunks )
156
164
new_deps .append (dep )
157
165
158
166
# Finally, re-do the calculation, which hopefully results in a better
159
167
# overall chunking for the result
160
- result = self ._calculate_array (* new_deps )
168
+ result = self ._calculate_array (* new_deps , ** other_args )
161
169
162
170
return result
163
171
0 commit comments