Checking GPU allocation when using MPI.

这个提交包含在:
craig-warren
2020-04-06 16:11:41 +01:00
父节点 ddb3f236e0
当前提交 73463a1d88
共有 2 个文件被更改,包括 3 次插入2 次删除

查看文件

@@ -123,7 +123,7 @@ class MPIContext(Context):
model_config = config.ModelConfig()
# Set GPU deviceID according to worker rank
if config.sim_config.general['cuda']:
gpu = config.sim_config.set_model_gpu(deviceID=self.rank - 1)
gpu = config.sim_config.cuda['gpus'][self.rank - 1]
model_config.cuda = {'gpu': gpu,
'snapsgpu2cpu': False}
config.model_configs = model_config

查看文件

@@ -265,7 +265,8 @@ class ModelBuildRun:
logger.warning(Fore.RED + f"You have specified more threads ({config.get_model_config().ompthreads}) than available physical CPU cores ({config.sim_config.hostinfo['physicalcores']}). This may lead to degraded performance." + Style.RESET_ALL)
# Print information about any GPU in use
elif config.sim_config.general['cuda']:
logger.basic(f"GPU for solving: {config.get_model_config().cuda['gpu'].deviceID} - {config.get_model_config().cuda['gpu'].name}\n")
import platform
logger.basic(f"GPU (on {platform.node()}) for solving: {config.get_model_config().cuda['gpu'].deviceID} - {config.get_model_config().cuda['gpu'].name}\n")
# Prepare iterator
if config.sim_config.general['progressbars']: