你已经派生过 gprMax
镜像自地址
https://gitee.com/sunhf/gprMax.git
已同步 2025-08-08 15:27:57 +08:00
Moved memory estimating to Grid class, and now called from model_build_run module.
这个提交包含在:
@@ -21,7 +21,9 @@ import decimal as d
|
|||||||
import inspect
|
import inspect
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from colorama import init, Fore, Style
|
from colorama import init
|
||||||
|
from colorama import Fore
|
||||||
|
from colorama import Style
|
||||||
init()
|
init()
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from scipy import interpolate
|
from scipy import interpolate
|
||||||
@@ -32,7 +34,6 @@ from gprMax.exceptions import CmdInputError
|
|||||||
from gprMax.exceptions import GeneralError
|
from gprMax.exceptions import GeneralError
|
||||||
from gprMax.utilities import get_host_info
|
from gprMax.utilities import get_host_info
|
||||||
from gprMax.utilities import human_size
|
from gprMax.utilities import human_size
|
||||||
from gprMax.utilities import memory_usage
|
|
||||||
from gprMax.utilities import round_value
|
from gprMax.utilities import round_value
|
||||||
from gprMax.waveforms import Waveform
|
from gprMax.waveforms import Waveform
|
||||||
|
|
||||||
@@ -201,19 +202,6 @@ def process_singlecmds(singlecmds, G):
|
|||||||
if G.messages:
|
if G.messages:
|
||||||
print('Time window: {:g} secs ({} iterations)'.format(G.timewindow, G.iterations))
|
print('Time window: {:g} secs ({} iterations)'.format(G.timewindow, G.iterations))
|
||||||
|
|
||||||
# Estimate memory (RAM) usage
|
|
||||||
memestimate = memory_usage(G)
|
|
||||||
# Check if model can be built and/or run on host
|
|
||||||
if memestimate > G.hostinfo['ram']:
|
|
||||||
raise GeneralError('Estimated memory (RAM) required ~{} exceeds {} detected!\n'.format(human_size(memestimate), human_size(hostinfo['ram'], a_kilobyte_is_1024_bytes=True)))
|
|
||||||
|
|
||||||
# Check if model can be run on specified GPU if required
|
|
||||||
if G.gpu is not None:
|
|
||||||
if memestimate > G.gpu.totalmem:
|
|
||||||
raise GeneralError('Estimated memory (RAM) required ~{} exceeds {} detected on specified {} - {} GPU!\n'.format(human_size(memestimate), human_size(G.gpu.totalmem, a_kilobyte_is_1024_bytes=True), G.gpu.deviceID, G.gpu.name))
|
|
||||||
if G.messages:
|
|
||||||
print('Estimated memory (RAM) required: ~{}'.format(human_size(memestimate)))
|
|
||||||
|
|
||||||
# PML
|
# PML
|
||||||
cmd = '#pml_cells'
|
cmd = '#pml_cells'
|
||||||
if singlecmds[cmd] is not None:
|
if singlecmds[cmd] is not None:
|
||||||
|
在新工单中引用
屏蔽一个用户