Skip to content

Commit 6f4d889

Browse files
committed
feat: add --total-ram option for controlling visible system RAM in Comfy
Adds a new command-line argument `--total-ram` to limit the amount of system RAM that ComfyUI considers available, allowing users to simulate lower memory environments. This enables more predictable behavior when testing or running on systems with limited resources. Rationale: I run Comfy inside a Docker container. Using `mem_limit` doesn't hide total system RAM from Comfy, so OOM can occur easily. Cache pressure limits cause frequent out-of-memory errors. Adding this flag allows precise control over visible memory. Signed-off-by: blob42 <[email protected]>
1 parent 8402c87 commit 6f4d889

File tree

2 files changed

+16
-4
lines changed

2 files changed

+16
-4
lines changed

comfy/cli_args.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ def __call__(self, parser, namespace, values, option_string=None):
9090
parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
9191
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
9292
parser.add_argument("--supports-fp8-compute", action="store_true", help="ComfyUI will act like if the device supports fp8 compute.")
93+
parser.add_argument("--total-ram", type=float, default=0, help="Maximum system RAM visible to comfy in GB (default 0: all)")
9394

9495
class LatentPreviewMethod(enum.Enum):
9596
NoPreviews = "none"

comfy/model_management.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -192,8 +192,12 @@ def get_total_memory(dev=None, torch_total_too=False):
192192
if dev is None:
193193
dev = get_torch_device()
194194

195-
if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
196-
mem_total = psutil.virtual_memory().total
195+
if hasattr(dev, "type") and (dev.type == "cpu" or dev.type == "mps"):
196+
mem_total = 0
197+
if args.total_ram != 0:
198+
mem_total = args.total_ram * 1024 * 1024
199+
else:
200+
mem_total = psutil.virtual_memory().total
197201
mem_total_torch = mem_total
198202
else:
199203
if directml_enabled:
@@ -236,8 +240,15 @@ def mac_version():
236240
return None
237241

238242
total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
239-
total_ram = psutil.virtual_memory().total / (1024 * 1024)
240-
logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
243+
244+
total_ram = 0
245+
if args.total_ram != 0:
246+
total_ram = args.total_ram * (1024) # arg in GB
247+
else:
248+
total_ram = psutil.virtual_memory().total / (1024 * 1024)
249+
logging.info(
250+
"Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)
251+
)
241252

242253
try:
243254
logging.info("pytorch version: {}".format(torch_version))

0 commit comments

Comments
 (0)