Signed-off-by: 葛峻恺 <202115006@mail.sdu.edu.cn>
这个提交包含在:
葛峻恺
2025-04-07 12:17:39 +00:00
提交者 Gitee
父节点 06ab241903
当前提交 699f32f283
共有 11 个文件被更改,包括 819 次插入0 次删除

125
1_model_generator.py 普通文件
查看文件

@@ -0,0 +1,125 @@
'''@author: Junkai Ge (gejunkai@mail.sdu.edu.cn) Shandong university'''
#generate gprMax in file for 3D model
#GPrMax needs to be pre installed
import numpy as np
from utils.layers_generator import generate_layers_1d,assign_permittivity,assign_permittivity_with_smooth_transition,assign_integer_values
import pandas as pd
import os
import matplotlib.pyplot as plt
from config import Forward_Model_Config as cfg
from config import Path_Config as pcfg
# #Initial modeling parameters
model_num=cfg.model_num #Number of models
depth=cfg.depth #Model depth unit:metre
air_depth=cfg.air_depth #Air layer thickness, unit:metre
grid_length=cfg.grid_length #grid length
surface_width=cfg.surface_width #surface width:metre
layers_range=cfg.layers_range #layers range
smooth_cell=cfg.smooth_cell #Model smoothing parameters
min_layer_thickness=cfg.min_layer_thickness #<=grid length
first_layer_minlenth=cfg.first_layer_minlenth #first layer minlenth
first_layer_maxlenth=cfg.first_layer_maxlenth #first layer maxlenth
permittivity_range=cfg.permittivity_range #the range of permittivity
first_layer_eps_range=cfg.first_layer_eps_range #the range of permittivity of first layer
Time=cfg.Time #Collection time window (within the medium)
static_time=cfg.static_time #Collection time window (in the air)
frequency=cfg.frequency #Antenna frequency
Twindows=cfg.Twindows #total time
root = cfg.root #current path
#Calculate the number of grids
grid_x=int(surface_width/grid_length)
grid_y=int(surface_width/grid_length)
grid_z=int(depth/grid_length)
graid_layer_min=int(min_layer_thickness/grid_length)
first_layer_minsize=int(first_layer_minlenth/grid_length)
first_layer_maxsize=int(first_layer_maxlenth/grid_length)
#Modeling
for index in range(model_num):
layers_num=np.random.randint(layers_range[0],layers_range[1])#Number of generated layers
layer=generate_layers_1d(grid_z, layers_num, graid_layer_min,smooth_cell,first_layer_minsize,first_layer_maxsize)#Generated layer thickness and arrangement
eps_model=assign_permittivity_with_smooth_transition(layer, layers_num, first_layer_eps_range, permittivity_range, smooth_cell)#Generate random permittivity and smooth layer
layer_id=assign_integer_values(eps_model)#assign id
#Initialization of velocity field, calculation of velocity field
vel_model=np.zeros(grid_z)
for element in range(grid_z):
vel_model[element]=0.3/np.sqrt(eps_model[element])
z = np.zeros(grid_z)+grid_length
v = vel_model
t = np.cumsum(2*grid_length / v)
Time_time = Time*1e9
new_time = np.arange(0, Time_time, Time_time/grid_z)
v_interpolated = np.interp(new_time, t, v)
# Cut the first grid-z points, and if there are not enough grid-z points, fill them with the last value
if len(v_interpolated) < grid_z:
v_interpolated = np.pad(v_interpolated, (0, grid_z - len(v_interpolated)), 'edge')
else:
v_interpolated = v_interpolated[:grid_z]
eps_interpolated=(0.3/v_interpolated)**2
# Draw a distribution diagram of permittivity/velocity over time
plt.figure()
plt.plot(new_time[:grid_z], eps_interpolated)
plt.xlabel('Time (s)')
plt.ylabel('Permittivity')
plt.title('Permittivity over time')
plt.grid(True)
plt.savefig('./dataset/eps_model_in_time/' + str(index + 1).zfill(5) + '.png')
#permittivity model over grid
plt.figure()
plt.plot(eps_model, 'o-', markersize=5)
plt.title("permittivity model over grid")
plt.ylim(0, 30)
plt.savefig('./dataset/eps_model_in_depth/' + str(index + 1).zfill(5) + '.png')
#One dimensional dielectric constant over storage time
output_file='./dataset/eps_label_in_time/'+str(index+1).zfill(5)+'.csv'
model_txtDF = pd.DataFrame(eps_interpolated)
model_txtDF.to_csv(output_file,index=False)
#Write 'in' files
fl = open(pcfg.in_data_dir+str(index+1).zfill(5)+'_'+str(int(eps_model[1])).zfill(2)+'.in','w')
fl.write('#title: '+str(index+1).zfill(5)+'\n')
fl.write('#domain: '+str(surface_width)+' '+str(surface_width)+' '+str(depth+air_depth)+'\n')
fl.write('#dx_dy_dz: '+str(grid_length)+' '+str(grid_length)+' '+str(grid_length)+'\n')
fl.write('#pml_cells: 20 20 20 20 20 20\n')
fl.write('#time_window: '+str(Twindows)+'\n')
fl.write('#waveform: ricker 1 '+str(frequency)+' my_ricker\n')
fl.write('#hertzian_dipole: z '+str(surface_width/2)+' '+str(surface_width/2)+' '+str(depth)+' my_ricker\n')
fl.write('#rx: '+str(surface_width/2)+' '+str(surface_width/2)+' '+str(depth)+'\n')
fl.write('#material: 1 0 1 0 kongqi\n')
fl.write('#box: 0 0 0 '+str(surface_width)+' '+str(surface_width)+' '+str(depth+air_depth)+' kongqi n \n')
#Assign material properties and modeling
current_eps = eps_model[0]
end_z = depth
material_idx=0
for i in range(1, grid_z):
if eps_model[i] != current_eps:
material_idx=material_idx+1
start_z = depth - i*grid_length
fl.write('#material: '+str(eps_model[i-1])+' 0 1 0 '+'material'+str(material_idx).zfill(5)+'\n')
fl.write('#box: 0 0 '+ str(start_z)+' '+str(surface_width)+' '+str(surface_width)+' '+str(end_z)+' '+'material'+str(material_idx).zfill(5)+' n \n')
current_eps = eps_model[i]
end_z = start_z
material_idx=material_idx+1
fl.write('#material: '+str(eps_model[-1])+' 0 1 0 '+'material'+str(material_idx).zfill(5)+'\n')
fl.write('#box: 0 0 0 '+str(surface_width)+' '+str(surface_width)+' '+str(end_z)+' '+'material'+str(material_idx).zfill(5)+' n \n')
fl.close()

74
2_forward_simulation.py 普通文件
查看文件

@@ -0,0 +1,74 @@
import sys
sys.path.append('D:/my_gprmax/gprMax') # Add gprMax installation path to system for module import
import os
from gprMax.gprMax import api
import numpy as np
from tools.plot_Bscan import get_output_data, mpl_plot
import matplotlib.pyplot as plt
import scipy.ndimage
from scipy.signal import tukey
from config import Forward_Model_Config as cfg
from config import Path_Config as pcfg
# Get the current script directory
path = os.getcwd()
root = pcfg.in_data_dir # Specify the input file directory
files = os.listdir(root) # Get the list of files in the directory
data_per_ns = cfg.data_per_ns # Sampling rate per nanosecond
direct_wave_time = cfg.direct_wave_time*1e9 # Direct wave duration (ns)
time = cfg.Time*1e9 # Time window (ns)
static_time = cfg.static_time**1e9 # Static correction time (ns)
for file in files:
if file.endswith('.in'): # Process only .in files
filename = root + file # Full path of the input file
fi = filename[:-3] # Remove .in extension while keeping the path
# Run gprMax simulation (geometry_only=False means full simulation)
api(filename, n=1, gpu={0}, geometry_only=False)
filename_b = fi + '.out'
rxnumber = 1
rxcomponent = 'Ez'
# Retrieve reflected wave data
outputdata, dt = get_output_data(filename_b, rxnumber, rxcomponent)
h = len(outputdata)
# Load direct wave data for background removal
zhidabo_data = np.loadtxt('./dataset/direct_wave_generation/out_data/000' + file[6:8] + '.txt')
outputdata = outputdata - zhidabo_data # Remove direct wave background
# Resample the data to match the required time resolution
outputdata = scipy.ndimage.zoom(outputdata, (int((time + static_time) * data_per_ns) / h), order=1)
# Apply static correction
static_output = outputdata[int(static_time * data_per_ns):]
outputdata = static_output
# Zero out the initial direct wave
outputdata[:int(direct_wave_time * data_per_ns)] = 0
# Apply dewow filter to remove low-frequency drift
dewow = np.mean(outputdata[int(direct_wave_time * data_per_ns):])
outputdata[int(direct_wave_time * data_per_ns):] -= dewow
# Save processed data
np.savetxt(pcfg.out_data_dir + file[:-6] + '.txt', outputdata, delimiter=' ')
# Plot and save the processed data
plt.figure()
plt.plot(outputdata, linestyle='-', color='b')
plt.title("1D Data Curve")
plt.xlabel("Index")
plt.ylabel("Value")
plt.grid(True)
plt.savefig('./dataset/out_data_img/' + file[:-6] + '.png')
# Remove original files after processing
os.remove(filename)
os.remove(filename_b)

92
3_combine_dataset.py 普通文件
查看文件

@@ -0,0 +1,92 @@
import pandas as pd
import numpy as np
import os
import scipy.ndimage
from config import Forward_Model_Config as cfg
from config import Path_Config as pcfg
# Parameters
data_per_ns = cfg.data_per_ns # data grid num per ns
DATA_LENGTH = cfg.data_length # Target length after interpolation (unit: grid number)
THRESHOLD = cfg.filter_threthold # Minimum absolute value threshold for valid data
# Input and output paths
INPUT_DATA_FOLDER = pcfg.INPUT_DATA_FOLDER
INPUT_LABEL_FOLDER = pcfg.INPUT_LABEL_FOLDER
DATA_OUTPUT_FILE = pcfg.dataset_path
LABEL_OUTPUT_FILE = pcfg.labelset_path
# Get all data and label filenames
data_files = sorted([f for f in os.listdir(INPUT_DATA_FOLDER) if f.endswith('.txt')])
label_files = sorted([f for f in os.listdir(INPUT_LABEL_FOLDER) if f.endswith('.csv')])
# Extract valid file IDs
data_ids = set(os.path.splitext(f)[0] for f in data_files)
label_ids = set(os.path.splitext(f)[0] for f in label_files)
valid_ids = sorted(data_ids & label_ids) # Only process matching data-label pairs
# Storage for processed data and labels
all_data = []
all_labels = []
for data_id in valid_ids:
try:
# Define file paths
data_path = os.path.join(INPUT_DATA_FOLDER, data_id + ".txt")
label_path = os.path.join(INPUT_LABEL_FOLDER, data_id + ".csv")
# Load data file (1D signal data)
raw_data = np.loadtxt(data_path, delimiter=",", skiprows=0)
processed_data = raw_data[:] # Remove direct wave component
# Interpolate data to the target length
processed_data = scipy.ndimage.zoom(processed_data, DATA_LENGTH / processed_data.shape[0], order=1)
# Skip data if the maximum absolute value is below the threshold
if np.max(np.abs(processed_data)) < THRESHOLD:
print(f"Skipping {data_id} due to low signal amplitude.")
continue
# Append processed data
all_data.append(processed_data)
# Load label file and remove the first column
label = np.delete(np.loadtxt(label_path, delimiter=",", skiprows=0), [0], axis=0)
all_labels.append(label)
except Exception as e:
print(f"Error processing file {data_id}: {e}")
continue
# Convert lists to 2D arrays
combined_data = np.column_stack(all_data) if all_data else np.array([])
combined_labels = np.column_stack(all_labels) if all_labels else np.array([])
# Save processed data
if combined_data.size > 0:
data_header = np.array([int(id_) for id_ in valid_ids]).reshape(1, -1)
if data_header.shape[1] != combined_data.shape[1]:
print(f"Warning: Mismatch between data header ({data_header.shape[1]}) and data columns ({combined_data.shape[1]}). Trimming header.")
data_header = data_header[:, :combined_data.shape[1]]
combined_data_with_header = np.vstack((data_header, combined_data))
np.savetxt(DATA_OUTPUT_FILE, combined_data_with_header, delimiter=",", fmt="%g")
print(f"Processed data saved to {DATA_OUTPUT_FILE}")
else:
print("No valid data files found.")
# Save processed labels
if combined_labels.size > 0:
label_header = np.array([int(id_) for id_ in valid_ids]).reshape(1, -1)
if label_header.shape[1] != combined_labels.shape[1]:
print(f"Warning: Mismatch between label header ({label_header.shape[1]}) and label columns ({combined_labels.shape[1]}). Trimming header.")
label_header = label_header[:, :combined_labels.shape[1]]
combined_labels_with_header = np.vstack((label_header, combined_labels))
np.savetxt(LABEL_OUTPUT_FILE, combined_labels_with_header, delimiter=",", fmt="%g")
print(f"Processed labels saved to {LABEL_OUTPUT_FILE}")
else:
print("No valid label files found.")

32
4_gssi_data_convert.py 普通文件
查看文件

@@ -0,0 +1,32 @@
import numpy as np
import pandas as pd
from readgssi.dzt import readdzt
from scipy.signal import tukey
from config import Field_data_test_Config as cfg
from config import Path_Config as pcfg
from utils.plot import plot_BSCAN_data
if __name__ == "__main__":
# File path and parameters
TEST_FILE = pcfg.TEST_FILE # Input DZT file
extract_time_grid =cfg.extract_time_grid # Extracted time range (grid size)
# Read GPR data
result = readdzt(infile=TEST_FILE)
data_result = result[1]
data = data_result[0]
data = data[2:extract_time_grid, :] # Trim data within the specified time range
# Normalize data
data = data / np.max(np.abs(data))
# Save raw data as CSV
pd.DataFrame(data).to_csv(pcfg.CONVERTED_TEST_FILE, index=False)
# Plot and save the permittivity constant image
plot_BSCAN_data(data, pcfg.CONVERTED_TEST_FILE_img, line_length=cfg.distance, time_length=cfg.time_window, ratio=0.3)

68
5_data_preprocess.py 普通文件
查看文件

@@ -0,0 +1,68 @@
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.ndimage
from readgssi.dzt import readdzt
from scipy.signal import tukey
from config import Field_data_test_Config as cfg
from config import Path_Config as pcfg
from utils.plot import plot_BSCAN_data
def dewow_windowed(in_array, window_size=40):
"""
Apply a sliding window mean subtraction (dewowing) to remove low-frequency noise.
"""
if len(in_array) < window_size:
return in_array - np.mean(in_array)
cumsum = np.cumsum(np.insert(in_array, 0, 0))
mean_vals = (cumsum[window_size:] - cumsum[:-window_size]) / window_size
mean_vals = np.concatenate((
np.full(window_size // 2, mean_vals[0]),
mean_vals,
np.full(len(in_array) - len(mean_vals) - window_size // 2, mean_vals[-1])
))
return in_array - mean_vals
def remove_bad_channels(data, remove_start, remove_end):
"""
Remove unwanted channels (columns) from a 2D array.
"""
remove_start = max(0, remove_start)
remove_end = min(data.shape[1], remove_end)
return np.delete(data, np.s_[remove_start:remove_end], axis=1)
def taper_field(in_array):
"""
Apply a tapering window to smooth signal edges and reduce artifacts.
"""
length = len(in_array)
window = np.ones(length)
taper_length = in_array.shape[0] // 2
taper = tukey(2 * taper_length, 0.7)[:taper_length]
window[:taper_length] = taper
window = window ** 3
return in_array * window
if __name__ == "__main__":
TEST_FILE = pcfg.CONVERTED_TEST_FILE
bad_trace=cfg.bad_trace
detection_distance=cfg.detection_distance
time_window_length=cfg.time_window_length
data = np.loadtxt(TEST_FILE, delimiter=",", skiprows=0)[1:, :] # Remove first row
data = remove_bad_channels(data, bad_trace[0], bad_trace[1])
data = np.apply_along_axis(dewow_windowed, 0, data)
data = scipy.ndimage.zoom(data, (time_window_length/data.shape[0],detection_distance/data.shape[1]), order=1)
data = np.apply_along_axis(taper_field, 0, data)
data = np.apply_along_axis(taper_field, 0, data)
data = data / np.max(np.abs(data))
# Save processed data
plot_BSCAN_data(data, pcfg.PROCESSED_TEST_FILE_img, line_length=100, time_length=200, ratio=0.5)
pd.DataFrame(data).to_csv(pcfg.PROCESSED_TEST_FILE, index=False)

83
6_extract_impulse.py 普通文件
查看文件

@@ -0,0 +1,83 @@
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from config import Field_data_test_Config as cfg
from config import Path_Config as pcfg
def shift_data_to_end(data, n):
"""
Shift the first n values of a 1D data array to the end.
Parameters:
data (list or numpy array): 1D data array
n (int): Number of elements to shift
Returns:
numpy array: Shifted data
"""
if n < 0 or n > len(data):
raise ValueError("Shift length n must be between 0 and the data length.")
return np.concatenate((data[n:], data[:n]))
# Step 1: Read 1D data from CSV or TXT file
def read_file(file_path, idx):
if file_path.endswith('.csv'):
data = pd.read_csv(file_path, header=None) # Assuming no column names
one_d_data = data.iloc[1:, idx].values # Read data from the idx-th column
elif file_path.endswith('.txt'):
data = np.loadtxt(file_path) # Assuming TXT file contains only numeric values
one_d_data = data if data.ndim == 1 else data[:, 0] # Ensure it's 1D
else:
raise ValueError("Unsupported file format. Please provide a .csv or .txt file.")
return one_d_data
# Step 2: Normalize data by dividing by its maximum absolute value
def normalize_data(data):
max_abs_value = np.max(np.abs(data))
return data if max_abs_value == 0 else data / max_abs_value # Avoid division by zero
# Step 3: Interpolate data to a length of 1000
def interpolate_data(data, target_length=1000):
original_length = len(data)
x_original = np.linspace(0, 1, original_length)
x_target = np.linspace(0, 1, target_length)
interpolator = interp1d(x_original, data, kind='cubic') # Cubic interpolation
return interpolator(x_target)
# Step 4: Save processed data to a CSV file
def save_to_csv(data, output_path):
pd.DataFrame(data).to_csv(output_path, index=False, header=False)
# Step 5: Plot original and interpolated data
def plot_data(original_data, interpolated_data):
plt.figure(figsize=(10, 5))
plt.plot(original_data, label='Original Data', marker='o', linestyle='--', alpha=0.7)
plt.plot(np.linspace(0, len(original_data)-1, len(interpolated_data)), interpolated_data, label='Interpolated Data', linestyle='-', linewidth=2)
plt.legend()
plt.xlabel('Index')
plt.ylabel('Value')
plt.title('Data Normalization and Interpolation')
plt.grid(alpha=0.3)
plt.show()
# Main execution
if __name__ == "__main__":
input_file = pcfg.PROCESSED_TEST_FILE # Input file path
output_csv = pcfg.field_impulse # Output file path
idx = cfg.refer_wave_idx # Column index to read data from
static_time=cfg.static_time
wavelet_range=cfg.wavelet_range
data = read_file(input_file, idx)
normalized_data = normalize_data(data)
# Zero out unwanted data regions
normalized_data[wavelet_range[1]:] = 0 # Set values after the source wave to zero
normalized_data[0:wavelet_range[0]] = 0 # Set values before the source wave to zero
interpolated_data = interpolate_data(normalized_data,cfg.time_window_length)
interpolated_data = shift_data_to_end(interpolated_data, static_time)
save_to_csv(interpolated_data, output_csv)
plot_data(normalized_data, interpolated_data)

79
7_network_train.py 普通文件
查看文件

@@ -0,0 +1,79 @@
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid, save_image
import time
import pandas as pd
from torchsummary import summary
from Network.MyDataset import MyDataset
from utils.train_val_lr import train, validate, adjust_learning_rate
from Network.Model import Model
from config import Network_train_Config as cfg
from config import Path_Config as pcfg
# File paths
data_file = pcfg.dataset_path
label_file = pcfg.labelset_path
impulse_field_file = pcfg.field_impulse
impulse_sim_file = pcfg.sim_impulse
if __name__ == '__main__':
BATCH_SIZE = cfg.BATCH_SIZE
LR = cfg.LR # Initial learning rate
EPOCHS = cfg.EPOCHS
val_loss_min = cfg.val_loss_min # Initial high value for validation loss tracking
# Model initialization
model = Model(inplanes=2, outplanes=1, layers=cfg.network_layers).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
loss_func = nn.MSELoss(reduction='mean')
# Create a CSV file to log training progress
df = pd.DataFrame(columns=['epoch', 'train Loss', 'val Loss', 'learning rate'])
df.to_csv(pcfg.train_val_loss, index=False)
# Load dataset
dataset = MyDataset(data_file, label_file, impulse_field_file, impulse_sim_file, mode='train', check=False)
train_size = int(len(dataset) * cfg.dataset_Proportion)
val_size = len(dataset) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])
# Create data loaders
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=cfg.num_workers, pin_memory=True)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=cfg.num_workers, pin_memory=True)
# TensorBoard writer
writer = SummaryWriter(r'Log')
print('Dataset loaded successfully.')
for epoch in range(EPOCHS):
epoch_start_time = time.time()
print(f'Starting epoch {epoch}')
# Adjust learning rate
LR = adjust_learning_rate(optimizer, epoch, LR)
# Training and validation
train_loss = train(train_loader, model, loss_func, optimizer, epoch)
val_loss = validate(val_loader, model, loss_func)
print(f'Validation Loss: {val_loss:.6f}')
# Save best model
if val_loss < val_loss_min:
torch.save(model.state_dict(), pcfg.BEST_MODEL_PATH)
val_loss_min = val_loss
# Periodically save model
if epoch % cfg.save_period == 0:
torch.save(model.state_dict(), pcfg.LATEST_MODEL_PATH)
# Log training details
log_data = pd.DataFrame([[epoch, train_loss, val_loss, LR]])
log_data.to_csv(pcfg.train_val_loss, mode='a', header=False, index=False)
print(f'Epoch {epoch} completed in {time.time() - epoch_start_time:.2f} seconds.')

62
8_prediction.py 普通文件
查看文件

@@ -0,0 +1,62 @@
import os
import torch
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from Network.MyDataset import *
from utils.train_val_lr import train, validate, adjust_learning_rate
from Network.Model import *
from scipy.signal import butter, lfilter, sosfilt
from config import Network_prediction_Config as cfg
from config import Path_Config as pcfg
from utils.plot import plot_permittivity_constant
# Set parameters
BATCH_SIZE = cfg.BATCH_SIZE
TEST_FILE = pcfg.PROCESSED_TEST_FILE
inversion_time_result_file=pcfg.inversion_time_result_file
inversion_time_result_img= pcfg.inversion_time_result_img
MODEL_PATH = pcfg.LATEST_MODEL_PATH
impulse_field_file = pcfg.field_impulse
impulse_sim_file = pcfg.sim_impulse
smooth_window_size = cfg.smooth_window_size
max_permittivity=cfg.max_permittvity
initial_params = cfg.initial_params
# Load the pre-trained model
model = Model(inplanes=2, outplanes=1, layers=cfg.network_layers).cuda()
model.load_state_dict(torch.load(MODEL_PATH))
model.eval()
# Load dataset
dataset = MyDataset(TEST_FILE, TEST_FILE, impulse_field_file, impulse_sim_file, mode='apply', initial_params=initial_params)
test_loader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=cfg.num_workers, pin_memory=True)
# Initialize results container
results = []
# Inference without gradient tracking
with torch.no_grad():
for idx, (data_data, label) in enumerate(test_loader):
data_data = data_data.cuda()
# Model inference
data_data_out = model(data_data.type(torch.cuda.FloatTensor))
# Move output to CPU and squeeze dimensions
data_data_out = data_data_out.cpu().numpy().squeeze()
results.append(data_data_out)
# Concatenate and save final results
final_result = np.column_stack(results)
# Apply smoothing filter (moving average)
final_result = np.apply_along_axis(lambda x: np.convolve(x, np.ones(smooth_window_size)/smooth_window_size, mode='valid'), axis=1, arr=final_result)
pd.DataFrame(final_result* max_permittivity).to_csv(inversion_time_result_file, index=False)
# Plot results
plot_permittivity_constant(final_result * max_permittivity, inversion_time_result_img, line_length=cfg.distance, time_length=cfg.time_window)

70
9_time_depth_convert.py 普通文件
查看文件

@@ -0,0 +1,70 @@
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from config import Network_prediction_Config as cfg
from config import Path_Config as pcfg
from utils.plot import plot_depth_permittivity_constant
def convert2depth(dielectric_img, dt=1e-9, dz_interval=0.05, max_samples=399):
"""
Convert a time-domain permittivity map into a depth-domain map.
:param dielectric_img: 2D NumPy array (time, distance), representing relative permittivity values.
:param dt: Time sampling interval (seconds), default is 1e-9 s.
:param dz_interval: Depth resampling interval (meters), default is 0.05 m.
:param max_samples: Maximum time samples per profile, default is 399.
Process:
1. Compute local wave velocity using: v = c / sqrt(ε), where c=3e8 m/s.
2. Compute cumulative depth: depth_z = cumsum(v_line * dt) / 2.
3. Resample permittivity to a uniform depth scale using interpolation.
:return: (depth_converted_img, min_depth)
- depth_converted_img: 2D array with depth as rows and distance as columns.
- min_depth: Maximum depth of all profiles (for reference).
"""
c = 3e8 # Speed of light in vacuum (m/s)
n_time, n_x = dielectric_img.shape
depth_img_list = []
z_stack_length = []
for i in range(n_x):
eps_line = dielectric_img[:, i][:max_samples] # Extract permittivity values
v_line = c / np.sqrt(eps_line) # Compute local wave velocity
depth_z = np.cumsum(v_line * dt) / 2 # Compute cumulative depth
# Insert initial depth and permittivity value for accurate interpolation
depth_z = np.insert(depth_z, 0, 0)
eps_line = np.insert(eps_line, 0, eps_line[0])
max_depth = np.max(depth_z)
dest_depth = np.arange(0, max_depth, dz_interval) # Define uniform depth scale
# Interpolate permittivity values to uniform depth scale
interp_func = interp1d(depth_z, eps_line, kind='linear', bounds_error=False, fill_value=np.nan)
eps_interp = interp_func(dest_depth)
depth_img_list.append(eps_interp)
z_stack_length.append(len(dest_depth))
# Standardize profile length with NaN padding for consistency
max_length = np.max(z_stack_length)
depth_img_padded = [np.pad(v, (0, max_length - len(v)), constant_values=np.nan) for v in depth_img_list]
# Convert list to NumPy array and transpose so rows = depth, columns = distance
depth_converted_img = np.array(depth_img_padded).T
max_depth = np.max(z_stack_length)
return depth_converted_img, max_depth * dz_interval
if __name__ == "__main__":
# Load data and apply depth conversion
data_file = pcfg.inversion_time_result_file
path = pcfg.inversion_depth_result_img
data = np.delete(np.loadtxt(data_file, delimiter=',', skiprows=0), [0], axis=0)
depth_converted_img, max_depth = convert2depth(data, dt=cfg.dt, dz_interval=cfg.dz_interval, max_samples=cfg.max_samples)
plot_depth_permittivity_constant(depth_converted_img, path, line_length=100, time_length=max_depth)

112
config.py 普通文件
查看文件

@@ -0,0 +1,112 @@
# config.py
import os
class Path_Config:
#path related to dataset
path = os.getcwd()
in_data_dir=path+ '/dataset/in_data/'
out_data_dir=path+ '/dataset/out_data/'
INPUT_DATA_FOLDER = './dataset/out_data/'
INPUT_LABEL_FOLDER = './dataset/eps_label_in_time/'
dataset_path="./dataset/data.csv"
labelset_path="./dataset/label.csv"
#path related to field data and inversion result
test_file_name='1350HENGDUANMIAN.DZT'
TEST_FILE = './field_data/'+test_file_name
CONVERTED_TEST_FILE=TEST_FILE[:-4]+'_RAW.csv'
CONVERTED_TEST_FILE_img='./IMG/'+test_file_name[:-4]+'_RAW.png'
PROCESSED_TEST_FILE='./field_data/'+test_file_name[:-4]+'_PROCESSED.csv'
PROCESSED_TEST_FILE_img= './IMG/'+test_file_name[:-4]+'_PROCESSED.png'
inversion_time_result_file='./time_result_csv/'+test_file_name[:-4]+'_time_result.csv'
inversion_time_result_img= './IMG/'+test_file_name[:-4]+'_time_result.png'
inversion_depth_result_img= './IMG/'+test_file_name[:-4]+'_depth_result.png'
#path related to training
field_impulse='./impulse/reflection_impulse_field_standard.csv'
sim_impulse='./impulse/impulse_simulated_standard.csv'
train_val_loss='./SAVE/train_val_loss.csv'
LATEST_MODEL_PATH = 'SAVE/latest_model.pt'
BEST_MODEL_PATH= 'SAVE/best_model.pt'
class Forward_Model_Config:
# parameters about forward modeling for dataset
model_num=5 # Number of models
depth=8 # Model depth unit:M
air_depth=0.5 # Air layer thickness, unit:M
grid_length=0.02 # grid length
surface_width=3 # surface width:M
layers_range=[2,7] # layers range
smooth_cell=12 # Model smoothing parameters
min_layer_thickness=0.02 # <=grid length
first_layer_minlenth=0.8 # first layer minlenth
first_layer_maxlenth=3.5 # first layer maxlenth
permittivity_range=(3,30) # the range of permittivity
first_layer_eps_range=(8,30) # the range of permittivity of first layer
max_permittivity=30 # the max permittivity
Time=200e-9 # Collection time window (within the medium)
static_time=9e-9 # Collection time window (in the air)
frequency=0.7e8 # Antenna central frequency
Twindows=Time+static_time # total time
data_per_ns=10 # Sampling rate per nanosecond
direct_wave_time=35e-9 # Direct wave duration (ns)
data_length=1000 # Target length after interpolation (unit: grid number)
filter_threthold=0.0015 # Minimum absolute value threshold for valid data
root = os.getcwd() # file path
class Field_data_test_Config:
# parameters about field data
distance=100 # unit: m
time_window=200 # unit: ns
extract_time_grid=317 # Extracted time range (grid size)
bad_trace=[3900,3901] # bad trace range
detection_distance=1000 # unit: grid
time_window_length=Forward_Model_Config.data_length # unit: grid
refer_wave_idx=920 # Reference trace index
wavelet_range=[215,325] # Reference trace location
static_time=wavelet_range[0]
class Network_train_Config:
BATCH_SIZE = 10 # batch size of training
LR = 0.001 # learning rate
EPOCHS = 60 # max training epoch
val_loss_min = 50 # val_loss_min
network_layers=[8, 8, 8, 8] # network layers
lr_decrease_rate=0.9 # learning rate decrease rate
dataset_Proportion=0.9 # Proportion of training set
save_period=10 # Save interval (unit: epoch)
num_workers=0 # num_workers
noise_coff=0.03 # Add intensity of noise
shift_distance=53 # Convolutional correction distance (unit: grid)
max_permittivity=Forward_Model_Config.max_permittivity
data_length=Forward_Model_Config.data_length
class Network_prediction_Config:
initial_params = ([21, 21, 21, 21, 21], [200, 200, 200, 200, 200]) #Set initial layered model
#example: initial_params = ([layer1_permittivity, layer2_permittivity, ...], [layer1_thickness, layer2_thickness, ...])
smooth_window_size = 20
######
######
######
num_workers= Network_train_Config.num_workers
dz_interval=Forward_Model_Config.grid_length
network_layers=Network_train_Config.network_layers
max_permittvity=Forward_Model_Config.max_permittivity
num_workers=Network_train_Config.num_workers
dt=Forward_Model_Config.Time/Forward_Model_Config.data_length
max_samples=Forward_Model_Config.data_length
distance=Field_data_test_Config.distance
time_window=Field_data_test_Config.time_window
BATCH_SIZE = 1

22
requirements.txt 普通文件
查看文件

@@ -0,0 +1,22 @@
colorama==0.4.5
Cython==3.0.12
geopy==2.4.1
h5py==3.10.0
matplotlib==3.5.3
mpi4py==4.0.3
numpy==2.2.4
obspy==1.4.0
pandas==1.4.3
psutil==5.9.0
psutil==5.9.2
pycuda==2025.1
pynmea2==1.19.0
pytz==2022.2.1
scipy==1.10.1
scipy==1.9.1
setuptools==61.2.0
terminaltables==3.1.10
torch==1.11.0
torchsummary==1.5.1
torchvision==0.12.0
tqdm==4.64.1