Skip to content

Commit 5862fb0

Browse files
committed
Prefer multiprocessing.cpu_count
This takes into account thread affinity, at least on python 3.13 and above.
1 parent 0256a8d commit 5862fb0

File tree

6 files changed

+15
-10
lines changed

6 files changed

+15
-10
lines changed

test/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1844,7 +1844,7 @@ def get_library(self, name, generated_libs, configure=['sh', './configure'], #
18441844
if env_init is None:
18451845
env_init = {}
18461846
if make_args is None:
1847-
make_args = ['-j', str(shared.get_num_cores())]
1847+
make_args = ['-j', str(utils.get_num_cores())]
18481848

18491849
build_dir = self.get_build_dir()
18501850

test/parallel_testsuite.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -431,4 +431,4 @@ def __init__(self, co):
431431
def num_cores():
432432
if NUM_CORES:
433433
return int(NUM_CORES)
434-
return multiprocessing.cpu_count()
434+
return utils.get_num_cores()

tools/js_optimizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ def check_symbol_mapping(p):
244244
# if we are making source maps, we want our debug numbering to start from the
245245
# top of the file, so avoid breaking the JS into chunks
246246

247-
intended_num_chunks = round(shared.get_num_cores() * NUM_CHUNKS_PER_CORE)
247+
intended_num_chunks = round(utils.get_num_cores() * NUM_CHUNKS_PER_CORE)
248248
chunk_size = min(MAX_CHUNK_SIZE, max(MIN_CHUNK_SIZE, total_size / intended_num_chunks))
249249
chunks = chunkify(funcs, chunk_size)
250250

tools/shared.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -115,10 +115,6 @@ def run_process(cmd, check=True, input=None, *args, **kw):
115115
return ret
116116

117117

118-
def get_num_cores():
119-
return int(os.environ.get('EMCC_CORES', os.cpu_count()))
120-
121-
122118
def returncode_to_str(code):
123119
assert code != 0
124120
if code < 0:
@@ -180,7 +176,7 @@ def get_finished_process():
180176
except subprocess.TimeoutExpired:
181177
pass
182178

183-
num_parallel_processes = get_num_cores()
179+
num_parallel_processes = utils.get_num_cores()
184180
temp_files = get_temp_files()
185181
i = 0
186182
num_completed = 0

tools/system_libs.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ def get_top_level_ninja_file():
159159

160160

161161
def run_ninja(build_dir):
162-
cmd = ['ninja', '-C', build_dir, f'-j{shared.get_num_cores()}']
162+
cmd = ['ninja', '-C', build_dir, f'-j{utils.get_num_cores()}']
163163
if shared.PRINT_SUBPROCS:
164164
cmd.append('-v')
165165
shared.check_call(cmd, env=clean_env())
@@ -538,7 +538,7 @@ def build_objects(self, build_dir):
538538
# Choose a chunk size that is large enough to avoid too many subprocesses
539539
# but not too large to avoid task starvation.
540540
# For now the heuristic is to split inputs by 2x number of cores.
541-
chunk_size = max(1, len(objects) // (2 * shared.get_num_cores()))
541+
chunk_size = max(1, len(objects) // (2 * utils.get_num_cores()))
542542
# Convert batches to commands.
543543
for cmd, srcs in batches.items():
544544
cmd = list(cmd)

tools/utils.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
# University of Illinois/NCSA Open Source License. Both these licenses can be
44
# found in the LICENSE file.
55

6+
import multiprocessing
67
import os
78
import shutil
89
import sys
@@ -112,6 +113,14 @@ def delete_contents(dirname, exclude=None):
112113
delete_file(entry)
113114

114115

116+
def get_num_cores():
117+
# Avoid os.cpu_count() here because it doesn't take into account thread affinity.
118+
# We could use os.process_cpu_count() but its only available in 3.13.
119+
# multiprocessing.cpu_count() will use os.process_cpu_count() automatically
120+
# on 3.13 and above
121+
return int(os.environ.get('EMCC_CORES', multiprocessing.cpu_count()))
122+
123+
115124
# TODO(sbc): Replace with functools.cache, once we update to python 3.9
116125
memoize = functools.lru_cache(maxsize=None)
117126

0 commit comments

Comments
 (0)