From 35f79e089979f7ea58ae1172816183282ca27395 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=91=9B=E5=B3=BB=E6=81=BA?= <202115006@mail.sdu.edu.cn> Date: Mon, 7 Apr 2025 12:34:47 +0000 Subject: [PATCH] program MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 葛峻恺 <202115006@mail.sdu.edu.cn> --- Network/Model.py | 137 ++++++++++++++ Network/MyDataset.py | 171 ++++++++++++++++++ Network/__pycache__/Model.cpython-310.pyc | Bin 0 -> 4559 bytes Network/__pycache__/MyDataset.cpython-310.pyc | Bin 0 -> 5217 bytes 4 files changed, 308 insertions(+) create mode 100644 Network/Model.py create mode 100644 Network/MyDataset.py create mode 100644 Network/__pycache__/Model.cpython-310.pyc create mode 100644 Network/__pycache__/MyDataset.cpython-310.pyc diff --git a/Network/Model.py b/Network/Model.py new file mode 100644 index 0000000..bd588e7 --- /dev/null +++ b/Network/Model.py @@ -0,0 +1,137 @@ +import torch +import torch.nn as nn + +# Define 1D convolution with kernel size 5 +def conv1d_5(inplanes, outplanes, stride=1): + return nn.Conv1d(inplanes, outplanes, kernel_size=5, stride=stride, + padding=2, bias=False) + +# Transformer-based self-attention module +class TransformerLayer(nn.Module): + def __init__(self, embed_dim, num_heads, ff_dim, dropout=0.1): + super(TransformerLayer, self).__init__() + self.attention = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout, batch_first=True) + self.norm1 = nn.LayerNorm(embed_dim) + self.ffn = nn.Sequential( + nn.Linear(embed_dim, ff_dim), + nn.ReLU(), + nn.Linear(ff_dim, embed_dim) + ) + self.norm2 = nn.LayerNorm(embed_dim) + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + x = x.permute(0, 2, 1) # [B, C, L] -> [B, L, C] + attn_output, _ = self.attention(x, x, x) + x = self.norm1(x + self.dropout(attn_output)) + + ffn_output = self.ffn(x) + x = self.norm2(x + self.dropout(ffn_output)) + + x = x.permute(0, 2, 1) # [B, L, C] -> [B, C, L] + return x + +# Downsampling Block using ResNet-style skip connections +class Block(nn.Module): + def __init__(self, inplanes, planes, stride=1, downsample=None, bn=False): + super(Block, self).__init__() + self.bn = bn + self.conv1 = conv1d_5(inplanes, planes, stride) + self.bn1 = nn.BatchNorm1d(planes) + self.relu = nn.ReLU(inplace=False) + self.conv2 = conv1d_5(planes, planes) + self.bn2 = nn.BatchNorm1d(planes) + self.downsample = downsample + + def forward(self, x): + residual = x + out = self.conv1(x) + if self.bn: + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + if self.bn: + out = self.bn2(out) + out = self.relu(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out = out + residual + out = self.relu(out) + return out + +# Upsampling Block +class Decoder_block(nn.Module): + def __init__(self, inplanes, outplanes, kernel_size=5, stride=5): + super(Decoder_block, self).__init__() + self.upsample = nn.ConvTranspose1d(inplanes, outplanes, + kernel_size=kernel_size, stride=stride, bias=False) + self.conv1 = conv1d_5(inplanes, outplanes) + self.relu = nn.ReLU(inplace=False) + self.conv2 = conv1d_5(outplanes, outplanes) + + def forward(self, x1, x2): + x1 = self.upsample(x1) + out = torch.cat((x1, x2), dim=1) + out = self.conv1(out) + out = self.relu(out) + out = self.conv2(out) + out = self.relu(out) + return out + +# Main Model +class Model(nn.Module): + def __init__(self, inplanes=1, outplanes=2, layers=[2, 2, 2, 2]): + super(Model, self).__init__() + self.inplanes = inplanes + self.outplanes = outplanes + self.encoder1 = self._make_encoder(Block, 32, layers[0], 5) + self.encoder2 = self._make_encoder(Block, 64, layers[1], 5) + self.encoder3 = self._make_encoder(Block, 128, layers[2], 5) + self.encoder4 = self._make_encoder(Block, 256, layers[3], 4) + + # Self-Attention Layer between Encoder and Decoder + self.self_attention = TransformerLayer(embed_dim=256, num_heads=8, ff_dim=512) + + self.decoder3 = Decoder_block(256, 128, stride=4, kernel_size=4) + self.decoder2 = Decoder_block(128, 64) + self.decoder1 = Decoder_block(64, 32) + self.conv1x1 = nn.ConvTranspose1d(32, outplanes, kernel_size=5, stride=5, bias=False) + + def _make_encoder(self, block, planes, blocks, stride=1): + downsample = None + if self.inplanes != planes or stride != 1: + downsample = nn.Conv1d(self.inplanes, planes, kernel_size=1, stride=stride, bias=False) + layers = [block(self.inplanes, planes, stride, downsample)] + self.inplanes = planes + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + return nn.Sequential(*layers) + + def forward(self, x): + down1 = self.encoder1(x) + down2 = self.encoder2(down1) + down3 = self.encoder3(down2) + down4 = self.encoder4(down3) + + # Apply self-attention layer + attention_out = self.self_attention(down4) + + up3 = self.decoder3(attention_out, down3) + up2 = self.decoder2(up3, down2) + up1 = self.decoder1(up2, down1) + out = self.conv1x1(up1) + return out + +# Test function to verify input-output compatibility +if __name__ == "__main__": + model = Model(inplanes=1, outplanes=1, layers=[3, 3, 3, 3]) + model.eval() + image = torch.randn(1, 1, 1000) + with torch.no_grad(): + output = model(image) + print(output.size()) + + diff --git a/Network/MyDataset.py b/Network/MyDataset.py new file mode 100644 index 0000000..2e129a7 --- /dev/null +++ b/Network/MyDataset.py @@ -0,0 +1,171 @@ +import os +import sys +import torch +import numpy as np +import matplotlib.pyplot as plt +from torch.utils.data import Dataset +from scipy.ndimage import zoom, gaussian_filter1d +import torch.nn.functional as F + +# Add parent directory to path for config import +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from config import Network_train_Config as cfg + +data_length=cfg.data_length + +def convolve_signals(signal1, signal2): + """ + Perform cyclic convolution using FFT. Output length is max(len(signal1), len(signal2)). + """ + len_signal = max(len(signal1), len(signal2)) + return np.fft.ifft(np.fft.fft(signal1, len_signal) * np.fft.fft(signal2, len_signal)).real + + +def apply_exponential_gain(data, gain_factor): + """ + Apply exponential gain to a signal. + """ + data = np.asarray(data) + indices = np.arange(len(data)) + gain = np.exp(gain_factor * indices) + return data * gain + + +def shift_data_to_end(data, n): + """ + Shift the first n elements of a 1D array to the end. + """ + if n < 0 or n > len(data): + raise ValueError("Shift length n must be within data length range.") + return np.concatenate((data[n:], data[:n])) + + +def shift_data_to_front(data, n): + """ + Shift the last n elements of a 1D array to the front. + """ + if n < 0 or n > len(data): + raise ValueError("Shift length n must be within data length range.") + return np.concatenate((data[-n:], data[:-n])) + + +class MyDataset(Dataset): + def __init__(self, data_file, label_file, impulse_field_file, impulse_sim_file, mode='train', + check=False, noise_coff=cfg.noise_coff, initial_params=None): + super(MyDataset, self).__init__() + self.mode = mode + self.check = check + self.noise_coff = noise_coff + + self.data = np.delete(np.loadtxt(data_file, delimiter=","), [0], axis=0) + self.labels = np.delete(np.loadtxt(label_file, delimiter=","), [0], axis=0) + + if self.mode in ['train', 'apply']: + self.impulse_field = np.loadtxt(impulse_field_file, delimiter=",") + self.impulse_sim = np.loadtxt(impulse_sim_file, delimiter=",") + else: + raise ValueError("Mode must be either 'train' or 'apply'") + + if self.mode == 'apply': + self.initial_model = self.generate_initial_model(*initial_params, total_length=data_length) + + self.data = self.data.T + self.labels = self.labels.T + + def __len__(self): + return self.data.shape[0] + + def __getitem__(self, index): + data_raw = self.data[index] + label_data = self.labels[index] + impulse_field = self.impulse_field + impulse_sim = self.impulse_sim + + data_raw = zoom(data_raw, data_length / len(data_raw)) + label_data = zoom(label_data, data_length / len(label_data)) + impulse_field = zoom(impulse_field, data_length / len(impulse_field)) + impulse_sim = zoom(impulse_sim, data_length / len(impulse_sim)) + + if self.mode == 'train': + data_gained = apply_exponential_gain(data_raw, 0.00) + data_noise1 = np.random.normal(0, self.noise_coff * np.max(abs(data_gained)), size=data_gained.shape) + data_noise2 = np.random.normal(0, self.noise_coff * np.max(abs(data_gained)), size=data_gained.shape) + + data_noise1 = convolve_signals(impulse_field, data_noise1) + data_noise1 = convolve_signals(impulse_sim, data_noise1) + + data_noise2 = convolve_signals(impulse_sim, data_noise2) + + data_gained = convolve_signals(impulse_field, data_gained) + data_gained = shift_data_to_end(data_gained, cfg.shift_distance) + + data_noised = data_gained + self.noise_coff * data_noise1 + self.noise_coff * data_noise2 + data_data = data_noised + + elif self.mode == 'apply': + data_meta = data_raw + data_data = convolve_signals(impulse_sim, data_meta) + data_data = shift_data_to_end(data_data, cfg.shift_distance) + data_data = data_data / np.max(abs(data_data)) + + # Construct initial model + if self.mode == 'train': + initial_model = np.full(data_length, label_data[1]) + elif self.mode == 'apply': + initial_model = self.initial_model + initial_model = zoom(initial_model, data_length / len(initial_model)) + initial_model = gaussian_filter1d(initial_model, sigma=data_length / 5) + + data_data = data_data / np.max(abs(data_data)) * 0.5 + + if self.check: + plt.figure(figsize=(10, 7)) + titles = [ + ('label_data', label_data), + ('initial_model', initial_model), + ('data_raw', data_raw), + ('data_noise1', data_noise1), + ('data_noise2', data_noise2), + ('impulse_field', impulse_field), + ('impulse_sim', impulse_sim), + ('data_without_noise', data_gained), + ('data_noised', data_data), + ] + for i, (title, signal) in enumerate(titles): + plt.subplot(9, 1, i + 1) + plt.plot(signal, label=title, color='blue') + plt.grid(alpha=0.3) + plt.legend() + plt.show() + + # Convert to PyTorch tensors + data_data = torch.from_numpy(data_data).float().unsqueeze(0) + label_data = torch.from_numpy(label_data).float().unsqueeze(0) + initial_model = torch.from_numpy(initial_model).float().unsqueeze(0) + + data_data = data_data + 0.5 + label_data = label_data / cfg.max_permittivity + initial_model = initial_model / cfg.max_permittivity + + input_data = torch.cat((data_data, initial_model), dim=0) + + return input_data, label_data + + def generate_initial_model(self, epsilon, thickness, total_length=data_length): + """ + Generate an initial layered model. + + Parameters: + - epsilon: List of permittivity values for each layer. + - thickness: Corresponding layer thicknesses. + - total_length: Total model length. + + Returns: + - A 1D numpy array of length total_length. + """ + layer_points = (np.array(thickness) / np.sum(thickness) * total_length).astype(int) + diff = total_length - np.sum(layer_points) + layer_points[0] += diff # Adjust rounding difference + + initial_model = np.concatenate([np.full(points, eps) for eps, points in zip(epsilon, layer_points)]) + return initial_model diff --git a/Network/__pycache__/Model.cpython-310.pyc b/Network/__pycache__/Model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..002e7f5a02decdddb06c94c1fdf44c2df7b03650 GIT binary patch literal 4559 zcmZ`-&668P74M!ejYc1?m|Vn&6^LMh_gOo=^MTPeI1|gl6xKc7Hs5B#7S25; zoY2^`cFnd8NkO=fxS7O(q$oT{yi8Jnq$J9alrxDN7DZ*tkXxZE3}I|AQT>FAr9HFl zh2FF@W1NL-TBc{BHZ$85%&If?+cM-{SlQ+6rT4H;i&<>fifB81V^^gl+N(n{dG=v;`b~&sk#)yLDjW z>45=qrD3VkRw$#e@5jCEP&siTdm>bYQ6NMw+ECVdFNhn4GNMR1tHbDaQ#4HF_M%Zg zh{9MEhvOt=rGo|cnXKUUpKBo7Z`^qOmF~FTKXWIz9d?IuaHbdC4rSaMMt(B%S6_Vl z#)JF6e(`>4!nx17oJ?`A*oHZTeUG)R`>=o8hG7b- zc3lQhOgjzb)!@TWKK%F(bp824hu)fB#FM4T;&B9$umqOgHKqnRJux?}32Z*G1YWjh zO_*;d=G2++8MOQYco(Ax9dl-Y^Je%B<|Z_ZnF*Z|=DB_ud}U(k-&O^~M$OiBc%&fY za0KhC(t40|Zu;GxjFZNavg7e6lyVvSQm*g!qF&^m%x{@;;`RU z#c;453SaaFsu+z2KD{r1J?_x> zU)iql?bDAt>TG_{4#7IV$Weq89TEb}gijGggn*g317}P*oPfWXdBfN-c3}FcfjOI+ z6Jy3tvKiM}a3?r8a%yK!;g0O-;);mUhSic3oRTmoX*&%E@f6CzI0@w`5Gj6`afcwg*#Z)6__FOQ`~|dMA+LBz_u`?W(*EdwIrjUNtPf zed=>|%6`z`vJUlqhaeIKgV6Vt=lg@9825=U`~IzQ&`+OehsbZz_9~gbI$^(``%G`= zYj~P^kAO$1TF0o19o5Pjj8BZwcp*kz9itSR6r+wPEW~J0*_Zmm&X%@o>q>{bn_t9} z&2?CQ0A?{PCpH4hOw!OacPY&5#F-Yz)8O62*(~aSrI`mkV`A(Y;7doe$`cCbN*0D! zfZ&2og`Kcbl~3WFvPpyxtc>+YCnsg=)Rl%{=@JDi#aL4)b3JOxFO#k%!~Qr`Tp{s_ z^2G2?6bFM*KWsP}8Lg(>cc0a$I5%OxErW;#c#!3 zBi2qb9ecT!xyJT+t5J}z(b4qzzD?YBh`dhZ4I*^q#Ej}!wAub zzREJ2Mz1W>qr=x}3ofE|6&hJ8bAem;QQ1HW;oBz`EOm%ovP^yvc4_D}eh6lcTb}Su z%Irqh&@rCo`W$a;I!7)te2&>%1~j7NrkR{86V(d=Nv}|LGL)U0%IpM5dh$6yGgvC) zZWCigUV!=$%kRl8&2_CxY=JR4&oFb1=G+WF3r! zi%dmVOwlxNl29Je%9@|>uDOtkJ)Mks+Y+uQA}QOtaiWg0q-@=$X%p$PhvU=YkC9IP zLV79jb7cPwlSO1xo%N9!Ky}?s9JH~fFivA@Gs?jQ$}f<@OTwN@-Aw90T9`|Vnbd`} z2&t!6@RAaaf}UPf_NW?^lM3dJ@bFjFZCe!*yg^riCGcqIG+#@WH|x5sHqlluiP|2; z-7y_e%dLYmI8E9Dkhcyl(vJKL7<3w@{3u=MW-O;X0zs;TbXm`=G^(TVXRW?=FeL~ zjT_~PG)X}_XO^$cyx})3X$d$Gw&@dt3NRE ztm~O&jkvS}NX?k#-F=>B|AKslc0^5t{1y=%e&i6{T*z06J9L+>ZaURdtkPj)!mG

I$SD~{}l!hXG4=V@z zM<=g?I&DcWLag7Y&!Fr9eON0iyp3*&Kq~M#*Bb=DY9Si>8!`~e$-Y7Ck?chYKI`xw yf+NS9LsGeKVl}+(`^ai6J_F+7>)bQzcKvky+4?c}GY?X?}Jae|$JcoXMGM8v1E(ZMlp7~*eL^B8@fjhcT;O}vvwsFedQ`7p z*Wasp_1^bhO*%K{8F+sC!QTh395al+(#QU%hL3NdWS@X=gR|J^mT%T&`rGW9`fYV> z{dPu`Zk16VGj>O{ZcX!6?2YQ(`l!)u=x-L!jhd#xjM02|ff<4g7q?o%06nB>Thqg0 zX`69-+uUZ`OpkNoBcr3;WsF!J9_t?C6<)n#bdPhF*YG~UJzmHAG2Y;Fc%OXL;7vY% z$Kdm#wrO=A=j<(`v+xjmF;+XQs9p|Lm&!KF0mM}wY86U`nA?<&eAlRv+r~B8^bX3}`-W^_lmB=Fq<8-6_cI~0tLM(W^x}(GFN^HDO2=0p z-2KUeU)_E1@els?;d`IGf9KPWKKN|whYvpd#b+Pg`Sj!WKmYya=fC^q)tO$ezP>&? ziyrkDxK7D%Z!NXtIWWRqm>*FCjR(Yjv&ql_)+ew$5+GZnPG2@Apr7+W@J zn{+*;GqBo**7#Yim2Z#7@wzW=kJCgXDhgwNfYbI>>W6;0?V0u*=|W0T4YN?naJ{I6 zGE4?Sk^vMJCg|9O&0$>KI?WLD!=6fIQH>HF^+Z$4F=cMi&Kb9UY%Zgf z!~Tft!^@xxd*hBTP zzA0OnzF_~My~T_c9MHspN&_tC6gp<4mbUG(lT4lf`@hw85v^ZFZFMMYPjPkQfa+d; zM0IiaB{bKUX`=S&PA*|7tMUk_1SXe`w$Dxu9o(rN{&n=*-RmhPAH#PK<-kKOLQi%X zL>L6%0B@Luu!c6b`u1I;54h#dom$tKL2G3OtySTojo_7aYj@#oT@S|6C~CWoTR0jU zudMbcXzV=1J17=P_D7HzESg*9HsC*ZawD+v3aFh|K^;{YR#99!3vS*sxix^Pa{C^P zHmL2k>UBTT>gC4J;?7M&Ue+^Mj45DAf$grYwL`kg4Ny5$R}t_L!~N=gVJ$2t-{GBV@!XN+T0+Bl1K$XVUI9n;q=x- z?|R`SX_Seem-hRm$5gN^p+r%Q(~zs%s`Qyk99|P~R?J1C@g&B8ek5XE)OS9!Xe0@2 zilu=_goL+gX@J3iq{T&DJ6E|f%z~BdwXwH5wZhIs+=o)g2N6I;;c1a2-`bI*#g=so zJ9Ut)YvdU$Kt4sJ1JZ5oNEriRN7;R-JP3Mlt1JliTtc2k-xpCb^6{owVe~s`o@Ac6 zz*@{TJ+lSA$u?TM_PVm8%pL*O&OD!)&v*kynQE5>7bFj#7%2?ZR#b*^=cig_Sd;Nt?OCq80D$O+?&ndfh*VS(VCANQv!!_ zkAFZ*TRFMKE6c{VIc+4%yEFa?+~H{h8fs__HD(%Ws3of5)#C;z#b4-;eR5e3@jU_ep}; z_t*^!{Nuzcf9or`x6QUrZ5vxpa0{7`r|ZD&tFv@n<3JsD&1*OZ4>r~sJ_)T_&`QZQ zlt27x?&)1x{1|k!m@m@K;Wo!tH69LN+WOfA%PiUu8ulKN-#w&20StAJYmLHL@4vj< zF&~msJf!3btyN^+G;Rl62yNs!v=y!f9vR-6A(e*3bNfG@lV{OUQi|AZ>E4u&q!ShB zzWclKhZMRB`x=rF+z%X$WI)_BPUYZv{rvf5T5K4P*FrEBk4Bx-8fxX!BxP5Wq~Ir? zB|>{BF^-bB8CoL&;<$WN5@h>ftbtEb-cnfMwM8CGLBGgE4X}x+O80J6{K4R4?3C|I}$s`TTzjUkvy{q>m#k3fF^RAkbTkf?yQ_G6~`_Pe+Irq5}Oq(_>HKM{g9f!Hyz^c&3eh4QjVgFrvj4 z*plg@y)uh2Wuzf$|Fd1E_6B|~Yg=vBrnrM~EsWSWydU{@SJb1^+Ih$}=-4&-cfsge z46<^%!{CMp6r=mf77!giOE^#U;Ex(pCr5JQ7|7?HZy?vYKw}+_>NIQH69;dTax@x{U3;2GR__K)X$Kkq=g8Mr5uXwwC-=Ij0>_TSeck9mjVw^>B znwd)mARVX)lbki(qyp=u}I*amqWZl}&&- zxS*_$MPWg>T*l;uwGoZclQNp3Iy=>(wtGJENi=jD`dET;&A92}e}fWzC}S%3Kv!D% z49NQ^83FjoMic&V3|_V2|G)Kj>XhszFf=5)UO)-xBqqKQ1UDvOT(-#Xkq}B0bp|NE zNgOAl!Iq>KbF+N0C*pV~XVj~t|7fIMCFRyHpk(yNf@_%Y-{!yECD&@drt|KOUzllD zSlN0e$uA2>TTQ9W#ZjmTN8{)k{<_eJS}EU>tfE-ZP`ZNuZQ^W2$0|C*?p)T3#_KCE zazr{&MhF6f?xC`OlC1P6Nss>R31j?2lK_nOzS$Lkcc|8Ie&aF0b`_CASJ*??QBmcQ zBo&boDG`U_D!4@>7D$S*rQWoTXv_K!1sz@Vy;1LZpCM{(k)jIsLU|4sDOzVfh-ZGVRkAVWVrhuIstAcJ1FQ8a=ZB literal 0 HcmV?d00001