Moved memory estimating to Grid class, and now called from model_build_run module.

这个提交包含在:
Craig Warren
2018-07-09 11:13:45 +01:00
父节点 607ace46c0
当前提交 25e7d4b533

查看文件

@@ -21,7 +21,9 @@ import decimal as d
import inspect
import sys
from colorama import init, Fore, Style
from colorama import init
from colorama import Fore
from colorama import Style
init()
import numpy as np
from scipy import interpolate
@@ -32,7 +34,6 @@ from gprMax.exceptions import CmdInputError
from gprMax.exceptions import GeneralError
from gprMax.utilities import get_host_info
from gprMax.utilities import human_size
from gprMax.utilities import memory_usage
from gprMax.utilities import round_value
from gprMax.waveforms import Waveform
@@ -201,19 +202,6 @@ def process_singlecmds(singlecmds, G):
if G.messages:
print('Time window: {:g} secs ({} iterations)'.format(G.timewindow, G.iterations))
# Estimate memory (RAM) usage
memestimate = memory_usage(G)
# Check if model can be built and/or run on host
if memestimate > G.hostinfo['ram']:
raise GeneralError('Estimated memory (RAM) required ~{} exceeds {} detected!\n'.format(human_size(memestimate), human_size(hostinfo['ram'], a_kilobyte_is_1024_bytes=True)))
# Check if model can be run on specified GPU if required
if G.gpu is not None:
if memestimate > G.gpu.totalmem:
raise GeneralError('Estimated memory (RAM) required ~{} exceeds {} detected on specified {} - {} GPU!\n'.format(human_size(memestimate), human_size(G.gpu.totalmem, a_kilobyte_is_1024_bytes=True), G.gpu.deviceID, G.gpu.name))
if G.messages:
print('Estimated memory (RAM) required: ~{}'.format(human_size(memestimate)))
# PML
cmd = '#pml_cells'
if singlecmds[cmd] is not None: