你已经派生过 gprMax
镜像自地址
https://gitee.com/sunhf/gprMax.git
已同步 2025-08-08 07:24:19 +08:00
Change logger.exception to logger.error
logger.exception is designed to be used inside an exception handler as the exception info is added to the logging message.
这个提交包含在:
@@ -218,15 +218,15 @@ class SimulationConfig:
|
|||||||
) # For depreciated Python blocks
|
) # For depreciated Python blocks
|
||||||
|
|
||||||
if self.taskfarm and self.geometry_fixed:
|
if self.taskfarm and self.geometry_fixed:
|
||||||
logger.exception("The geometry fixed option cannot be used with MPI taskfarm.")
|
logger.error("The geometry fixed option cannot be used with MPI taskfarm.")
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
||||||
if self.gpu and self.opencl:
|
if self.gpu and self.opencl:
|
||||||
logger.exception("You cannot use both CUDA and OpenCl simultaneously.")
|
logger.error("You cannot use both CUDA and OpenCl simultaneously.")
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
||||||
if self.mpi and hasattr(self.args, "subgrid") and self.args.subgrid:
|
if self.mpi and hasattr(self.args, "subgrid") and self.args.subgrid:
|
||||||
logger.exception("You cannot use subgrids with MPI.")
|
logger.error("You cannot use subgrids with MPI.")
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
||||||
# Each model in a simulation is given a unique number when the instance of ModelConfig is created
|
# Each model in a simulation is given a unique number when the instance of ModelConfig is created
|
||||||
@@ -245,7 +245,7 @@ class SimulationConfig:
|
|||||||
# or when specified by the user.
|
# or when specified by the user.
|
||||||
|
|
||||||
if args.show_progress_bars and args.hide_progress_bars:
|
if args.show_progress_bars and args.hide_progress_bars:
|
||||||
logger.exception("You cannot both show and hide progress bars.")
|
logger.error("You cannot both show and hide progress bars.")
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
||||||
self.general = {
|
self.general = {
|
||||||
@@ -302,7 +302,7 @@ class SimulationConfig:
|
|||||||
if (self.general["subgrid"] and self.general["solver"] == "cuda") or (
|
if (self.general["subgrid"] and self.general["solver"] == "cuda") or (
|
||||||
self.general["subgrid"] and self.general["solver"] == "opencl"
|
self.general["subgrid"] and self.general["solver"] == "opencl"
|
||||||
):
|
):
|
||||||
logger.exception(
|
logger.error(
|
||||||
"You cannot currently use CUDA or OpenCL-based solvers with models that contain sub-grids."
|
"You cannot currently use CUDA or OpenCL-based solvers with models that contain sub-grids."
|
||||||
)
|
)
|
||||||
raise ValueError
|
raise ValueError
|
||||||
@@ -401,7 +401,7 @@ class SimulationConfig:
|
|||||||
return dev
|
return dev
|
||||||
|
|
||||||
if not found:
|
if not found:
|
||||||
logger.exception(f"Compute device with device ID {deviceID} does " "not exist.")
|
logger.error(f"Compute device with device ID {deviceID} does " "not exist.")
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
||||||
def get_model_config(self, model_num: Optional[int] = None) -> ModelConfig:
|
def get_model_config(self, model_num: Optional[int] = None) -> ModelConfig:
|
||||||
@@ -418,7 +418,7 @@ class SimulationConfig:
|
|||||||
|
|
||||||
model_config = self.model_configs[model_num]
|
model_config = self.model_configs[model_num]
|
||||||
if model_config is None:
|
if model_config is None:
|
||||||
logger.exception(f"Cannot get ModelConfig for model {model_num}. It has not been set.")
|
logger.error(f"Cannot get ModelConfig for model {model_num}. It has not been set.")
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
||||||
return model_config
|
return model_config
|
||||||
|
@@ -168,7 +168,7 @@ class MPIContext(Context):
|
|||||||
|
|
||||||
requested_mpi_size = np.product(config.sim_config.mpi)
|
requested_mpi_size = np.product(config.sim_config.mpi)
|
||||||
if self.comm.size < requested_mpi_size:
|
if self.comm.size < requested_mpi_size:
|
||||||
logger.exception(
|
logger.error(
|
||||||
(
|
(
|
||||||
f"MPI_COMM_WORLD size of {self.comm.size} is too small for requested dimensions of"
|
f"MPI_COMM_WORLD size of {self.comm.size} is too small for requested dimensions of"
|
||||||
f" {config.sim_config.mpi}. {requested_mpi_size} ranks are required."
|
f" {config.sim_config.mpi}. {requested_mpi_size} ranks are required."
|
||||||
@@ -290,7 +290,7 @@ class TaskfarmContext(Context):
|
|||||||
and config.sim_config.general["solver"] == "cuda"
|
and config.sim_config.general["solver"] == "cuda"
|
||||||
and executor.size - 1 > len(config.sim_config.devices["devs"])
|
and executor.size - 1 > len(config.sim_config.devices["devs"])
|
||||||
):
|
):
|
||||||
logger.exception(
|
logger.error(
|
||||||
"Not enough GPU resources for number of "
|
"Not enough GPU resources for number of "
|
||||||
"MPI tasks requested. Number of MPI tasks "
|
"MPI tasks requested. Number of MPI tasks "
|
||||||
"should be equal to number of GPUs + 1."
|
"should be equal to number of GPUs + 1."
|
||||||
|
在新工单中引用
屏蔽一个用户