diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..452386e --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,11 @@ +repos: + - repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort + + - repo: https://github.com/psf/black + rev: 23.7.0 + hooks: + - id: black + diff --git a/data/download.py b/data/download.py index 7266fd0..eb25723 100644 --- a/data/download.py +++ b/data/download.py @@ -1,8 +1,9 @@ # Download data from Cook Inlet DAS Experiment # Yiyu Ni (niyiyu@uw.edu) -import pandas as pd import os + +import pandas as pd import wget # download data for TERRA? @@ -34,8 +35,8 @@ # download data for each event for idx, i in df.iterrows(): - date = i['date'] - eid = i['IRIS ID'] + date = i["date"] + eid = i["IRIS ID"] print("=============================================") print(f"INFO: processing event {eid} on {date}.") if TERRA: diff --git a/scripts/datasets.py b/scripts/datasets.py index ae5e8b0..14024ef 100644 --- a/scripts/datasets.py +++ b/scripts/datasets.py @@ -1,10 +1,12 @@ import numpy as np import torch + class DASDataset(torch.utils.data.Dataset): - ''' + """ torch dataset class for SHallow REcurrent Network. - ''' + """ + def __init__(self, inputs, outputs): if isinstance(inputs, torch.Tensor): self.inputs = inputs @@ -21,11 +23,13 @@ def __getitem__(self, index): Y = self.outputs[index, :] return X, Y - + + class DASDataset2DOnTheFly(torch.utils.data.Dataset): - ''' + """ torch dataset class for Random Fourier Feature Network. - ''' + """ + def __init__(self, B, T, X, outputs): self.B = torch.Tensor(B) self.T = torch.Tensor(T) @@ -38,21 +42,25 @@ def __len__(self): def __getitem__(self, index): _t = self.T[index] _x = self.X[index] - _Bv = torch.matmul(torch.Tensor(torch.concat([_t, _x], axis = -1)), self.B.T) - X = torch.cat([torch.cos(2*torch.pi*_Bv), torch.sin(2*torch.pi*_Bv)], axis = -1) + _Bv = torch.matmul(torch.Tensor(torch.concat([_t, _x], axis=-1)), self.B.T) + X = torch.cat( + [torch.cos(2 * torch.pi * _Bv), torch.sin(2 * torch.pi * _Bv)], axis=-1 + ) Y = self.outputs[index, :] return X, Y + class DASDataset2D(torch.utils.data.Dataset): - ''' + """ torch dataset class for SIREN model. - ''' + """ + def __init__(self, T, X, outputs): self.T = torch.Tensor(T) self.X = torch.Tensor(X) self.outputs = torch.Tensor(outputs) - + def __len__(self): return len(self.outputs) @@ -62,4 +70,4 @@ def __getitem__(self, index): X = self.X[index] Y = self.outputs[index] - return torch.concat([T, X], axis=-1), Y \ No newline at end of file + return torch.concat([T, X], axis=-1), Y diff --git a/scripts/models.py b/scripts/models.py index cfdef67..9eed8a4 100644 --- a/scripts/models.py +++ b/scripts/models.py @@ -1,21 +1,25 @@ import torch + class SHRED(torch.nn.Module): def __init__(self, input_size, hidden_size, output_size, num_layers): super().__init__() - self.lstm = torch.nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, dropout=0.1) - self.sdn1 = torch.nn.Linear(hidden_size, output_size//2) - self.sdn3 = torch.nn.Linear(output_size//2, output_size) + self.lstm = torch.nn.LSTM( + input_size, hidden_size, num_layers, batch_first=True, dropout=0.1 + ) + self.sdn1 = torch.nn.Linear(hidden_size, output_size // 2) + self.sdn3 = torch.nn.Linear(output_size // 2, output_size) self.relu = torch.nn.ReLU() def forward(self, x): - x = self.lstm(x)[1][0][-1] # should be -1 + x = self.lstm(x)[1][0][-1] # should be -1 x = self.relu(self.sdn1(x)) x = self.sdn3(x) - return - + return + + class RandomFourierFeature(torch.nn.Module): - def __init__(self, nfeature, n_layers, n_outputs = 1): + def __init__(self, nfeature, n_layers, n_outputs=1): super().__init__() self.n_layers = n_layers self.n_outputs = n_outputs @@ -33,9 +37,10 @@ def forward(self, x): for i in range(self.n_layers): layer = getattr(self, f"ln{i+1}") x = self.relu(layer(x)) - x = 2*self.sigmoid(self.outputs(x))-1 + x = 2 * self.sigmoid(self.outputs(x)) - 1 return x + class SIREN(torch.nn.Module): def __init__(self, n_input, n_output, n_layers, n_units, omega): super().__init__() @@ -62,6 +67,6 @@ def forward(self, x): layer = getattr(self, f"ln{i+1}") bias = getattr(self, f"ln{i+1}_bias") x = torch.sin(self.omega * layer(x) + bias) - x = 2*self.sigmoid(self.outputs(x) + self.outputs_bias) - 1 + x = 2 * self.sigmoid(self.outputs(x) + self.outputs_bias) - 1 - return x \ No newline at end of file + return x diff --git a/scripts/rffn/run.py b/scripts/rffn/run.py index fc0ce2d..9e4140d 100644 --- a/scripts/rffn/run.py +++ b/scripts/rffn/run.py @@ -1,7 +1,8 @@ +import argparse +import time + import numpy as np from tqdm import tqdm -import time -import argparse parser = argparse.ArgumentParser(description="Fitting GCI DAS data") parser.add_argument("-f", "--file", required=True) @@ -16,12 +17,14 @@ gpu_id = args.gpu import os -os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) +os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) + +import h5py import torch -from torch.utils.data import DataLoader from torch.nn import MSELoss -import h5py +from torch.utils.data import DataLoader + class DASDatasetOnTheFly(torch.utils.data.Dataset): def __init__(self, B, T, X, outputs): @@ -36,13 +39,16 @@ def __len__(self): def __getitem__(self, index): _t = self.T[index] _x = self.X[index] - _Bv = torch.matmul(torch.Tensor(torch.concat([_t, _x], axis = -1)), self.B.T) - X = torch.cat([torch.cos(2*torch.pi*_Bv), torch.sin(2*torch.pi*_Bv)], axis = -1) + _Bv = torch.matmul(torch.Tensor(torch.concat([_t, _x], axis=-1)), self.B.T) + X = torch.cat( + [torch.cos(2 * torch.pi * _Bv), torch.sin(2 * torch.pi * _Bv)], axis=-1 + ) y = self.outputs[index, :] return X, y - + + class RandomFourierFeature(torch.nn.Module): - def __init__(self, nfeature, n_layers, n_outputs = 1): + def __init__(self, nfeature, n_layers, n_outputs=1): super().__init__() self.n_layers = n_layers self.n_outputs = n_outputs @@ -60,13 +66,16 @@ def forward(self, x): for i in range(self.n_layers): layer = getattr(self, f"ln{i+1}") x = self.relu(layer(x)) - x = 2*self.sigmoid(self.outputs(x))-1 + x = 2 * self.sigmoid(self.outputs(x)) - 1 return x - -print(f"====REPORT: working on {filetime}, channel {channel}-{channel+1000}, GPU {gpu_id}====") -df = h5py.File(filename, 'r') -data = df['/Acquisition/Raw[0]/RawData'][:, channel:channel+1000].T + +print( + f"====REPORT: working on {filetime}, channel {channel}-{channel+1000}, GPU {gpu_id}====" +) + +df = h5py.File(filename, "r") +data = df["/Acquisition/Raw[0]/RawData"][:, channel : channel + 1000].T df.close() nc, ns = data.shape @@ -77,18 +86,21 @@ def forward(self, x): data = torch.Tensor(data) in_x, in_t = torch.meshgrid(torch.arange(nc), torch.arange(ns)) -T = in_t/(ns-1); X = in_x/(nc-1) +T = in_t / (ns - 1) +X = in_x / (nc - 1) -device = torch.device('cuda') +device = torch.device("cuda") n_feature = 196 -B = torch.Tensor(np.random.normal(scale = 20, size = (n_feature, 2))) +B = torch.Tensor(np.random.normal(scale=20, size=(n_feature, 2))) -datasetonthefly = DASDatasetOnTheFly(B.to(device), - T.reshape([-1, 1]).to(device), - X.reshape([-1, 1]).to(device), - data.reshape([-1, 1]).to(device)) +datasetonthefly = DASDatasetOnTheFly( + B.to(device), + T.reshape([-1, 1]).to(device), + X.reshape([-1, 1]).to(device), + data.reshape([-1, 1]).to(device), +) data_loader = DataLoader(datasetonthefly, batch_size=4096, shuffle=True, num_workers=0) @@ -98,9 +110,9 @@ def forward(self, x): i.weight.data.normal_(mean=0.0, std=0.1) i.bias.data.normal_(mean=0.0, std=0.1) -model.to(device); +model.to(device) -optimizer = torch.optim.Adam(model.parameters(), lr = 3e-4) +optimizer = torch.optim.Adam(model.parameters(), lr=3e-4) loss_fn = MSELoss() nepoch = 8 train_loss_log = [] @@ -108,7 +120,7 @@ def forward(self, x): for t in range(nepoch): model.train() train_loss = 0 - + for batch_id, batch in enumerate(data_loader): pred = model(batch[0].cuda()) loss = loss_fn(pred, batch[1].cuda()) @@ -116,13 +128,13 @@ def forward(self, x): loss.backward() optimizer.step() train_loss += loss.item() - + train_loss /= len(data_loader) - + train_loss_log.append(train_loss) - print(f"{t}: train loss: %.6f" % train_loss) + print(f"{t}: train loss: %.6f" % train_loss) if train_loss < 1e-4: break print(time.time() - t0) -torch.save(model.state_dict(), f"./weights/rff_{filetime}_{channel}-{channel+1000}.pt") \ No newline at end of file +torch.save(model.state_dict(), f"./weights/rff_{filetime}_{channel}-{channel+1000}.pt") diff --git a/scripts/rffn/submit.py b/scripts/rffn/submit.py index b7d9299..3a557cd 100644 --- a/scripts/rffn/submit.py +++ b/scripts/rffn/submit.py @@ -1,26 +1,31 @@ from datetime import datetime - from mpi4py import MPI + comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() -import os import glob +import os + import numpy as np -with open("./log.log", 'a') as f: +with open("./log.log", "a") as f: f.write(f"{rank}: ==============={datetime.now()}===========\n") -flist = sorted(glob.glob("/home/niyiyu/Research/DAS-NIR/datasets/KKFL-S_FiberA_2p5Hz/decimator3_2023-07-16_*_UTC.h5")) +flist = sorted( + glob.glob( + "/home/niyiyu/Research/DAS-NIR/datasets/KKFL-S_FiberA_2p5Hz/decimator3_2023-07-16_*_UTC.h5" + ) +) for f in flist: - for ic, c in enumerate(np.linspace(1000, 6000, 6).astype('int')): + for ic, c in enumerate(np.linspace(1000, 6000, 6).astype("int")): if ic % size == rank: gpuid = rank % 4 command = f"/home/niyiyu/anaconda3/envs/dasnir/bin/python /home/niyiyu/Research/DAS-NIR/gci-summary/tests/rff/run.py -f {f} -c {c} -g 3" os.system(command) -with open("./log.log", 'w') as f: - f.write(f"{rank}: ==============={datetime.now()}===========\n") \ No newline at end of file +with open("./log.log", "w") as f: + f.write(f"{rank}: ==============={datetime.now()}===========\n") diff --git a/scripts/siren/run.py b/scripts/siren/run.py index da45542..7700eb2 100644 --- a/scripts/siren/run.py +++ b/scripts/siren/run.py @@ -1,7 +1,8 @@ +import argparse +import time + import numpy as np from tqdm import tqdm -import time -import argparse parser = argparse.ArgumentParser(description="Fitting GCI DAS data") parser.add_argument("-f", "--file", required=True) @@ -16,33 +17,36 @@ gpu_id = args.gpu import os -os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) +os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) + +import h5py import torch -from torch.utils.data import DataLoader from torch.nn import MSELoss -import h5py +from torch.utils.data import DataLoader + class DASDataset(torch.utils.data.Dataset): def __init__(self, T, X, outputs): - 'Initialization' + "Initialization" self.T = torch.Tensor(T) self.X = torch.Tensor(X) self.outputs = torch.Tensor(outputs) - + def __len__(self): - 'Denotes the total number of samples' + "Denotes the total number of samples" return len(self.outputs) def __getitem__(self, index): - 'Generates one sample of data' + "Generates one sample of data" # Select sample t = self.T[index] x = self.X[index] y = self.outputs[index] return torch.concat([t, x], axis=-1), y - + + class SIREN(torch.nn.Module): def __init__(self, n_input, n_output, n_layers, n_units, omega): super().__init__() @@ -69,14 +73,17 @@ def forward(self, x): layer = getattr(self, f"ln{i+1}") bias = getattr(self, f"ln{i+1}_bias") x = torch.sin(self.omega * layer(x) + bias) - x = 2*self.sigmoid(self.outputs(x) + self.outputs_bias) - 1 + x = 2 * self.sigmoid(self.outputs(x) + self.outputs_bias) - 1 return x - -print(f"====REPORT: working on {filetime}, channel {channel}-{channel+1000}, GPU {gpu_id}====") -df = h5py.File(filename, 'r') -data = df['/Acquisition/Raw[0]/RawData'][:, channel:channel+1000].T + +print( + f"====REPORT: working on {filetime}, channel {channel}-{channel+1000}, GPU {gpu_id}====" +) + +df = h5py.File(filename, "r") +data = df["/Acquisition/Raw[0]/RawData"][:, channel : channel + 1000].T df.close() nc, ns = data.shape @@ -87,42 +94,45 @@ def forward(self, x): data = torch.Tensor(data) in_x, in_t = torch.meshgrid(torch.arange(nc), torch.arange(ns)) -T = in_t/(ns-1); X = in_x/(nc-1) +T = in_t / (ns - 1) +X = in_x / (nc - 1) -device = torch.device('cuda') +device = torch.device("cuda") -dataset = DASDataset(T.reshape([-1, 1]).to(device), - X.reshape([-1, 1]).to(device), - data.reshape([-1, 1]).to(device)) +dataset = DASDataset( + T.reshape([-1, 1]).to(device), + X.reshape([-1, 1]).to(device), + data.reshape([-1, 1]).to(device), +) -data_loader = DataLoader(dataset, batch_size=1024*16, shuffle=True, num_workers=0) +data_loader = DataLoader(dataset, batch_size=1024 * 16, shuffle=True, num_workers=0) n_units = 128 n_layers = 20 n_input = 2 n_output = 1 omega = 30 -model = SIREN(n_input = n_input, - n_output = n_output, - n_layers = n_layers, - n_units = n_units, - omega = omega) +model = SIREN( + n_input=n_input, n_output=n_output, n_layers=n_layers, n_units=n_units, omega=omega +) for name, mod in model.named_parameters(): - if "inputs" in name: # for input layer - if 'bias' in name: - mod.data.uniform_(-1/np.sqrt(n_input), 1/np.sqrt(n_input)) - elif 'weight' in name: - mod.data.uniform_(-1/2, 1/2) - else: # for hidden layer - if 'bias' in name: - mod.data.uniform_(-1/np.sqrt(n_units), 1/np.sqrt(n_units)) - elif 'weight' in name: - mod.data.uniform_(-np.sqrt(6/n_units)/omega, np.sqrt(6/n_units)/omega) - -model.to(device); - -optimizer = torch.optim.Adam(model.parameters(), lr = 1e-5) + if "inputs" in name: # for input layer + if "bias" in name: + mod.data.uniform_(-1 / np.sqrt(n_input), 1 / np.sqrt(n_input)) + elif "weight" in name: + mod.data.uniform_(-1 / 2, 1 / 2) + else: # for hidden layer + if "bias" in name: + mod.data.uniform_(-1 / np.sqrt(n_units), 1 / np.sqrt(n_units)) + elif "weight" in name: + mod.data.uniform_( + -np.sqrt(6 / n_units) / omega, np.sqrt(6 / n_units) / omega + ) + +model.to(device) + +optimizer = torch.optim.Adam(model.parameters(), lr=1e-5) loss_fn = MSELoss() nepoch = 8 @@ -131,7 +141,7 @@ def forward(self, x): for t in range(nepoch): model.train() train_loss = 0 - + for batch_id, batch in enumerate(data_loader): pred = model(batch[0].cuda()) loss = loss_fn(pred, batch[1].cuda()) @@ -139,13 +149,15 @@ def forward(self, x): loss.backward() optimizer.step() train_loss += loss.item() - + train_loss /= len(data_loader) - + train_loss_log.append(train_loss) - print(f"{t}: train loss: %.6f" % train_loss) + print(f"{t}: train loss: %.6f" % train_loss) if train_loss < 1e-4: break print(time.time() - t0) -torch.save(model.state_dict(), f"./weights/siren_{filetime}_{channel}-{channel+1000}.pt") \ No newline at end of file +torch.save( + model.state_dict(), f"./weights/siren_{filetime}_{channel}-{channel+1000}.pt" +) diff --git a/scripts/siren/submit.py b/scripts/siren/submit.py index ea2f66e..dd123f4 100644 --- a/scripts/siren/submit.py +++ b/scripts/siren/submit.py @@ -1,26 +1,31 @@ from datetime import datetime - from mpi4py import MPI + comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() -import os import glob +import os + import numpy as np -with open("./log.log", 'a') as f: +with open("./log.log", "a") as f: f.write(f"{rank}: ==============={datetime.now()}===========\n") -flist = sorted(glob.glob("/home/niyiyu/Research/DAS-NIR/datasets/KKFL-S_FiberA_2p5Hz/decimator3_2023-07-16_*_UTC.h5")) +flist = sorted( + glob.glob( + "/home/niyiyu/Research/DAS-NIR/datasets/KKFL-S_FiberA_2p5Hz/decimator3_2023-07-16_*_UTC.h5" + ) +) for f in flist: - for ic, c in enumerate(np.linspace(1000, 6000, 6).astype('int')): + for ic, c in enumerate(np.linspace(1000, 6000, 6).astype("int")): if ic % size == rank: gpuid = rank % 4 command = f"/home/niyiyu/anaconda3/envs/dasnir2/bin/python /home/niyiyu/Research/DAS-NIR/gci-summary/tests/siren/run.py -f {f} -c {c} -g 1" os.system(command) -with open("./log.log", 'w') as f: - f.write(f"{rank}: ==============={datetime.now()}===========\n") \ No newline at end of file +with open("./log.log", "w") as f: + f.write(f"{rank}: ==============={datetime.now()}===========\n") diff --git a/scripts/taup_calculate_arrival.py b/scripts/taup_calculate_arrival.py index 6c5d967..fe8f5a1 100644 --- a/scripts/taup_calculate_arrival.py +++ b/scripts/taup_calculate_arrival.py @@ -1,15 +1,16 @@ -import numpy as np -import pandas as pd -import utm -import time import glob import os -import obspy -from tqdm import tqdm +import time + import geopy.distance as geo -from obspy.taup import TauPyModel -from obspy.core.utcdatetime import UTCDateTime +import numpy as np +import obspy +import pandas as pd +import utm from joblib import Parallel, delayed +from obspy.core.utcdatetime import UTCDateTime +from obspy.taup import TauPyModel +from tqdm import tqdm nterra = 8531 nkkfls = 8531 @@ -20,56 +21,74 @@ ############################################################################# # interpolate channel location -kkfls = pd.read_csv("../../datasets/cables/KKFLS_geom.xy", - header=None, names=["lon", "lat"], delim_whitespace=True) -terra = pd.read_csv("../../datasets/cables/TERRA_geom.xy", - header=None, names=["lon", "lat"], delim_whitespace=True) +kkfls = pd.read_csv( + "../../datasets/cables/KKFLS_geom.xy", + header=None, + names=["lon", "lat"], + delim_whitespace=True, +) +terra = pd.read_csv( + "../../datasets/cables/TERRA_geom.xy", + header=None, + names=["lon", "lat"], + delim_whitespace=True, +) for idx, i in kkfls.iterrows(): - kkfls.loc[idx, 'x'] = utm.from_latlon(i['lat'], i['lon'])[0] - kkfls.loc[idx, 'y'] = utm.from_latlon(i['lat'], i['lon'])[1] + kkfls.loc[idx, "x"] = utm.from_latlon(i["lat"], i["lon"])[0] + kkfls.loc[idx, "y"] = utm.from_latlon(i["lat"], i["lon"])[1] dt = 0 -kkfls.loc[0, 'dist'] = 0 -for i in range(len(kkfls)-1): - dx = kkfls.loc[i+1]['x'] - kkfls.loc[i]['x'] - dy = kkfls.loc[i+1]['y'] - kkfls.loc[i]['y'] - dt += np.sqrt(dx ** 2 + dy ** 2) - kkfls.loc[i+1, 'dist'] = dt +kkfls.loc[0, "dist"] = 0 +for i in range(len(kkfls) - 1): + dx = kkfls.loc[i + 1]["x"] - kkfls.loc[i]["x"] + dy = kkfls.loc[i + 1]["y"] - kkfls.loc[i]["y"] + dt += np.sqrt(dx**2 + dy**2) + kkfls.loc[i + 1, "dist"] = dt for idx, i in terra.iterrows(): - terra.loc[idx, 'x'] = utm.from_latlon(i['lat'], i['lon'])[0] - terra.loc[idx, 'y'] = utm.from_latlon(i['lat'], i['lon'])[1] + terra.loc[idx, "x"] = utm.from_latlon(i["lat"], i["lon"])[0] + terra.loc[idx, "y"] = utm.from_latlon(i["lat"], i["lon"])[1] dt = 0 -terra.loc[0, 'dist'] = 0 -for i in range(len(terra)-1): - dx = terra.loc[i+1]['x'] - terra.loc[i]['x'] - dy = terra.loc[i+1]['y'] - terra.loc[i]['y'] - dt += np.sqrt(dx ** 2 + dy ** 2) - terra.loc[i+1, 'dist'] = dt +terra.loc[0, "dist"] = 0 +for i in range(len(terra) - 1): + dx = terra.loc[i + 1]["x"] - terra.loc[i]["x"] + dy = terra.loc[i + 1]["y"] - terra.loc[i]["y"] + dt += np.sqrt(dx**2 + dy**2) + terra.loc[i + 1, "dist"] = dt + +kkfls = kkfls[kkfls["dist"] <= nkkfls * cha_spac] +terra = terra[terra["dist"] <= nterra * cha_spac] -kkfls = kkfls[kkfls['dist'] <= nkkfls * cha_spac] -terra = terra[terra['dist'] <= nterra * cha_spac] +kkfls_lon = np.interp( + np.arange(0, nkkfls * cha_spac, cha_spac), kkfls["dist"], kkfls["lon"] +) +kkfls_lat = np.interp( + np.arange(0, nkkfls * cha_spac, cha_spac), kkfls["dist"], kkfls["lat"] +) -kkfls_lon = np.interp(np.arange(0, nkkfls*cha_spac, cha_spac), kkfls['dist'], kkfls['lon']) -kkfls_lat = np.interp(np.arange(0, nkkfls*cha_spac, cha_spac), kkfls['dist'], kkfls['lat']) +terra_lon = np.interp( + np.arange(0, nterra * cha_spac, cha_spac), terra["dist"], terra["lon"] +) +terra_lat = np.interp( + np.arange(0, nterra * cha_spac, cha_spac), terra["dist"], terra["lat"] +) -terra_lon = np.interp(np.arange(0, nterra*cha_spac, cha_spac), terra['dist'], terra['lon']) -terra_lat = np.interp(np.arange(0, nterra*cha_spac, cha_spac), terra['dist'], terra['lat']) ############################################################################# def calculate_ps(i, rlat, rlon, olat, lon, depth): dist = geo.great_circle((olat, olon), (rlat[i], rlon[i])) - dist_deg = dist.km * 180. / (np.pi * dist.RADIUS) - _p = model.get_travel_times(source_depth_in_km=depth, - distance_in_degree=dist_deg, - phase_list=['P','p'])[0].time - _s = model.get_travel_times(source_depth_in_km=depth, - distance_in_degree=dist_deg, - phase_list=['S','s'])[0].time + dist_deg = dist.km * 180.0 / (np.pi * dist.RADIUS) + _p = model.get_travel_times( + source_depth_in_km=depth, distance_in_degree=dist_deg, phase_list=["P", "p"] + )[0].time + _s = model.get_travel_times( + source_depth_in_km=depth, distance_in_degree=dist_deg, phase_list=["S", "s"] + )[0].time return [_p, _s] + for event in tqdm(sorted(glob.glob("../../datasets/earthquakes/*.h5"))): eid = event.split("/")[-1].split(".")[0] if os.path.exists(f"../../datasets/arrivals/{eid}.csv"): @@ -81,19 +100,31 @@ def calculate_ps(i, rlat, rlon, olat, lon, depth): depth = porig.depth / 1e3 olat = porig.latitude olon = porig.longitude - + try: - t_kkfls = np.array(Parallel(n_jobs=40)(delayed(calculate_ps)(i, kkfls_lat, kkfls_lon, olat, olon, depth) - for i in range(nkkfls))) - t_terra = np.array(Parallel(n_jobs=40)(delayed(calculate_ps)(i, terra_lat, terra_lon, olat, olon, depth) - for i in range(nterra))) - - df = pd.DataFrame({ - "cable": ["KKFLS"]*nkkfls + ['TERRA']*nterra, - "channel_index": np.concatenate([np.arange(nkkfls), np.arange(nterra)]), - "t_p": np.concatenate([t_kkfls[:, 0], t_terra[:, 0]]), - "t_s": np.concatenate([t_kkfls[:, 1], t_terra[:, 1]]), - }) - df.to_csv(f"../../datasets/arrivals/{eid}.csv", index=False, float_format="%.3f") + t_kkfls = np.array( + Parallel(n_jobs=40)( + delayed(calculate_ps)(i, kkfls_lat, kkfls_lon, olat, olon, depth) + for i in range(nkkfls) + ) + ) + t_terra = np.array( + Parallel(n_jobs=40)( + delayed(calculate_ps)(i, terra_lat, terra_lon, olat, olon, depth) + for i in range(nterra) + ) + ) + + df = pd.DataFrame( + { + "cable": ["KKFLS"] * nkkfls + ["TERRA"] * nterra, + "channel_index": np.concatenate([np.arange(nkkfls), np.arange(nterra)]), + "t_p": np.concatenate([t_kkfls[:, 0], t_terra[:, 0]]), + "t_s": np.concatenate([t_kkfls[:, 1], t_terra[:, 1]]), + } + ) + df.to_csv( + f"../../datasets/arrivals/{eid}.csv", index=False, float_format="%.3f" + ) except: pass diff --git a/scripts/utils.py b/scripts/utils.py index 601b7cf..efdb08d 100644 --- a/scripts/utils.py +++ b/scripts/utils.py @@ -1,19 +1,23 @@ import gc -import torch + import numpy as np +import torch + def get_mse(original, compressed): if isinstance(original, torch.Tensor): original = get_array(original) if isinstance(compressed, torch.Tensor): compressed = get_array(compressed) - + return np.mean((original - compressed) ** 2) + def get_rmse(original, compressed): mse = get_mse(original, compressed) return np.sqrt(mse) + def get_psnr(original, compressed): if isinstance(original, torch.Tensor): original = get_array(original) @@ -27,9 +31,11 @@ def get_psnr(original, compressed): psnr = 20 * np.log10(max_pixel / np.sqrt(mse)) return psnr + def clean_up(): gc.collect() torch.cuda.empty_cache() + def count_weights(model): - return sum(p.numel() for p in model.parameters() if p.requires_grad) \ No newline at end of file + return sum(p.numel() for p in model.parameters() if p.requires_grad)