From 9f54788d1ee1fd2b461232c475d6a04a00d176d7 Mon Sep 17 00:00:00 2001 From: ltscamfer <156849455+ltscamfer@users.noreply.github.com> Date: Thu, 11 Apr 2024 11:54:43 -0800 Subject: [PATCH 1/8] addressed np.complex deprecation issue --- array_processing/tools/array_characterization.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/array_processing/tools/array_characterization.py b/array_processing/tools/array_characterization.py index 1cba5dc..e2b35b3 100644 --- a/array_processing/tools/array_characterization.py +++ b/array_processing/tools/array_characterization.py @@ -398,10 +398,10 @@ def quadraticEqn(a, b, c): # note np.sqrt(-1) = nan, so force complex argument if b: # std. sub-branch - q = -0.5*(b + np.sign(b) * np.sqrt(np.complex(b * b - 4 * a * c))) + q = -0.5*(b + np.sign(b) * np.sqrt(complex(b * b - 4 * a * c))) else: # b = 0 sub-branch - q = -np.sqrt(np.complex(-a * c)) + q = -np.sqrt(complex(-a * c)) # complex coefficient branch else: if np.real(np.conj(b) * np.sqrt(b * b - 4 * a * c)) >= 0: From 9b15b61ff9d12903ac849b21047aae6f22f92dea Mon Sep 17 00:00:00 2001 From: SamDelamere Date: Sat, 16 Nov 2024 19:51:03 -0900 Subject: [PATCH 2/8] Adding optional argument to refine beam by minimizing RMS between beam and constituent traces. --- array_processing/tools/generic.py | 215 ++++++++++++++++++++++-------- 1 file changed, 159 insertions(+), 56 deletions(-) diff --git a/array_processing/tools/generic.py b/array_processing/tools/generic.py index da42ef1..93a48f6 100644 --- a/array_processing/tools/generic.py +++ b/array_processing/tools/generic.py @@ -27,28 +27,39 @@ def array_thresh(mcthresh, az_volc, az_diff, mdccm, az, vel): # Use numpy to find where thresholds are exceeded mc_good = np.where(mdccm > mcthresh)[0] az_good = np.where((az >= az_volc - az_diff) & (az <= az_volc + az_diff))[0] - vel_good = np.where((vel >= .25) & (vel <= .45))[0] + vel_good = np.where((vel >= 0.25) & (vel <= 0.45))[0] igood = reduce(np.intersect1d, (mc_good, az_good, vel_good)) # Find find number of consecutive values exceeded. ranges = [] nconsec = [] - for k, g in groupby(enumerate(igood), lambda x: x[0]-x[1]): + for k, g in groupby(enumerate(igood), lambda x: x[0] - x[1]): group = list(map(itemgetter(1), g)) ranges.append((group[0], group[-1])) - nconsec.append(group[-1]-group[0]+1) + nconsec.append(group[-1] - group[0] + 1) if len(nconsec) > 0: consecmax = max(nconsec) else: consecmax = 0 - print('%d above trheshold, %d consecutive\n' % (len(igood), consecmax)) + print("%d above trheshold, %d consecutive\n" % (len(igood), consecmax)) return igood -def beamForm(data, rij, Hz, azPhi, vel=0.340, r=None, wgt=None, refTrace=None, - M=None, Moffset=None): +def beamForm( + data, + rij, + Hz, + azPhi, + vel=0.340, + r=None, + wgt=None, + refTrace=None, + M=None, + Moffset=None, + minimizeRMS=False, +): r""" Form a "best beam" from the traces of an array. @@ -101,25 +112,24 @@ def beamForm(data, rij, Hz, azPhi, vel=0.340, r=None, wgt=None, refTrace=None, else: if len(wgt) != nTraces: # catch dimension mismatch between tau & wgt - raise IndexError('len(wgt) != ' + str(nTraces)) - wgt = np.array(wgt) # require array form here for later operations + raise IndexError("len(wgt) != " + str(nTraces)) + wgt = np.array(wgt) # require array form here for later operations # default refTrace is first non-zero wgt if refTrace is None: - refTrace = np.min(np.where(wgt != 0)) # requires array wgt + refTrace = np.min(np.where(wgt != 0)) # requires array wgt # default Moffset is zero for all traces if Moffset is None: Moffset = [0 for i in range(nTraces)] else: if len(Moffset) != nTraces: # catch dimension mismatch between tau & Moffset - raise IndexError('len(Moffset) != ' + str(nTraces)) + raise IndexError("len(Moffset) != " + str(nTraces)) # -- end default parsing & error checking ------------------------------- # planar (far-field) or spherical (near-field) arrival? if r is None: tau = tauCalcPW(vel, azPhi, rij) else: - # need to unpack & repack azPhi with care if np.isscalar(azPhi): tau = tauCalcSW(vel, [r, azPhi], rij) @@ -129,8 +139,12 @@ def beamForm(data, rij, Hz, azPhi, vel=0.340, r=None, wgt=None, refTrace=None, beam_delays = phaseAlignIdx(tau, Hz, wgt, refTrace) # apply shifts, resulting in a zero-padded array beamMatrix = phaseAlignData(data, beam_delays, wgt, refTrace, M, Moffset) + # if minimizing RMS between beam and constituent traces + if minimizeRMS: + # return beam with traces shifted to minimize RMS error + return alignTracesMinRMS(beamMatrix, wgt) # linear algrebra to perform sum & then normalize by weights - return beamMatrix@wgt / wgt.sum() + return beamMatrix @ wgt / wgt.sum() def phaseAlignData(data, delays, wgt, refTrace, M, Moffset, plotFlag=False): @@ -185,30 +199,34 @@ def phaseAlignData(data, delays, wgt, refTrace, M, Moffset, plotFlag=False): # embed shifted traces in array for k in range(nTraces): if wgt[k]: - data_align[delays[k]:delays[k]+m, k] = data[:, k] * wgt[k] + data_align[delays[k] : delays[k] + m, k] = data[:, k] * wgt[k] # truncate|| pad data_align if M >< m, centered on refTrace mp = data_align.shape[0] # new value for m if M is not None and M is not mp: - alignBounds = [delays[refTrace] + m//2 - M//2, - delays[refTrace] + m//2 + M//2] + alignBounds = [ + delays[refTrace] + m // 2 - M // 2, + delays[refTrace] + m // 2 + M // 2, + ] # trap round-off errors and force (M, nTraces) data_align - if alignBounds[1]-alignBounds[0] != M: + if alignBounds[1] - alignBounds[0] != M: alignBounds[1] += 1 - if not (alignBounds[1]-alignBounds[0])%2: + if not (alignBounds[1] - alignBounds[0]) % 2: alignBounds[0] -= 1 # -- LHS (graphically, but actually topside in array-land!) if alignBounds[0] < 0: # pad LHS of traces w zeros or np.nans - data_align = np.vstack((np.zeros((-alignBounds[0], nTraces)) * nanOrOne, - data_align)) + data_align = np.vstack( + (np.zeros((-alignBounds[0], nTraces)) * nanOrOne, data_align) + ) elif alignBounds[0] > 0: - data_align = data_align[alignBounds[0]:] + data_align = data_align[alignBounds[0] :] # -- RHS (graphically, but actually bottom in array-land!) if alignBounds[1] > mp: # pad RHS of traces w zeros or np.nans - data_align = np.vstack((data_align, np.zeros((alignBounds[1] - mp, - nTraces)) * nanOrOne)) + data_align = np.vstack( + (data_align, np.zeros((alignBounds[1] - mp, nTraces)) * nanOrOne) + ) elif alignBounds[1] < mp: data_align = data_align[:M] return data_align @@ -240,11 +258,13 @@ def phaseAlignIdx(tau, Hz, wgt, refTrace): nTraces = int(1 + np.sqrt(1 + 8 * len(tau))) // 2 # calculate delays (samples) relative to refTrace for each trace # -- first pass grabs delays starting with refTrace as i in ij - delayIdx = (nTraces*refTrace - refTrace*(refTrace+1)//2, - nTraces*(refTrace+1) - (refTrace+1)*(refTrace+2)//2) + delayIdx = ( + nTraces * refTrace - refTrace * (refTrace + 1) // 2, + nTraces * (refTrace + 1) - (refTrace + 1) * (refTrace + 2) // 2, + ) delays = np.hstack((0, (tau[range(delayIdx[0], delayIdx[1])] * Hz))).astype(int) # the std. rij list comprehension for unique inter-trace pairs - tau_ij = [(i, j) for i in range(nTraces) for j in range(i+1, nTraces)] + tau_ij = [(i, j) for i in range(nTraces) for j in range(i + 1, nTraces)] # -- second pass grabs delays with refTrace as j in ij preRefTau_idx = [k for k in range(len(tau)) if tau_ij[k][1] == refTrace] delays = np.hstack((-tau[preRefTau_idx] * Hz, delays)).astype(int) @@ -276,8 +296,7 @@ def tauCalcPW(vel, azPhi, rij): dim, nTraces = rij.shape if dim == 2: rij = np.vstack((rij, np.zeros((1, nTraces)))) - idx = [(i, j) for i in range(rij.shape[1]-1) - for j in range(i+1, rij.shape[1])] + idx = [(i, j) for i in range(rij.shape[1] - 1) for j in range(i + 1, rij.shape[1])] X = rij[:, [i[0] for i in idx]] - rij[:, [j[1] for j in idx]] if np.isscalar(azPhi): phi = 0 @@ -289,7 +308,7 @@ def tauCalcPW(vel, azPhi, rij): s = np.array([np.cos(az), np.sin(az), np.sin(phi)]) s[:-1] *= np.cos(phi) - return X.T@(s/vel) + return X.T @ (s / vel) def tauCalcSW(vel, rAzPhi, rij): @@ -318,16 +337,17 @@ def tauCalcSW(vel, rAzPhi, rij): phi = rAzPhi[2] / 180 * np.pi else: phi = 0 - idx = [(i, j) for i in range(rij.shape[1]-1) - for j in range(i+1, rij.shape[1])] + idx = [(i, j) for i in range(rij.shape[1] - 1) for j in range(i + 1, rij.shape[1])] # aw, this is so convolutedly elegant that it must be saved in a # comment for posterity!, but the line below it is "simpler" # az = -( (rAzPhi[1]/180*pi - 2*pi)%(2*pi) - pi/2 )%(2*pi) az = np.pi * (-rAzPhi[1] / 180 + 0.5) source = rAzPhi[0] * np.array([np.cos(az), np.sin(az), np.sin(phi)]) source[:-1] *= np.cos(phi) - tau2sensor = np.linalg.norm(rij - np.tile(source, nTraces).reshape(nTraces, 3).T, - 2, axis=0)/vel + tau2sensor = ( + np.linalg.norm(rij - np.tile(source, nTraces).reshape(nTraces, 3).T, 2, axis=0) + / vel + ) return tau2sensor[[j[1] for j in idx]] - tau2sensor[[i[0] for i in idx]] @@ -359,10 +379,11 @@ def tauCalcSWxy(vel, xy, rij): else: xy0 = [] source = np.hstack((xy, xy0)) - idx = [(i, j) for i in range(rij.shape[1]-1) - for j in range(i+1, rij.shape[1])] - tau2sensor = np.linalg.norm(rij - np.tile(source, nTraces).reshape(nTraces, 3).T, - 2, axis=0)/vel + idx = [(i, j) for i in range(rij.shape[1] - 1) for j in range(i + 1, rij.shape[1])] + tau2sensor = ( + np.linalg.norm(rij - np.tile(source, nTraces).reshape(nTraces, 3).T, 2, axis=0) + / vel + ) return tau2sensor[[j[1] for j in idx]] - tau2sensor[[i[0] for i in idx]] @@ -398,36 +419,37 @@ def randc(N, beta=0.0): c0 = np.inf # catch the case of a 1D array in python, so dimensions act like a matrix if len(N) == 1: - M = (N[0], 1) # use M[1] any time # of columns is called for + M = (N[0], 1) # use M[1] any time # of columns is called for else: M = N # phase array with size (# of unique complex Fourier components, # columns of original data) - n = int(np.floor((N[0] - 1) / 2)) # works for odd/even cases + n = int(np.floor((N[0] - 1) / 2)) # works for odd/even cases cPhase = np.random.random_sample((n, M[1])) * 2 * np.pi # Nyquist placeholders - if N[0]%2: + if N[0] % 2: # odd case: Nyquist is 1/2 freq step between highest components # so it is empty cFiller = np.empty((0,)) pFiller = np.empty((0, M[1])) else: # even case: we have a Nyquist component - cFiller = N[0]/2 + cFiller = N[0] / 2 pFiller = np.zeros((1, M[1])) # noise amplitudes are just indices (unit-offset!!) to be normalized # later, phases are arranged as Fourier conjugates r = np.hstack((c0, np.arange(1, n + 1), cFiller, np.arange(n, 0, -1))) - phasor = np.exp(np.vstack((np.zeros((1, M[1])), 1j * cPhase, pFiller, - -1j * np.flipud(cPhase)))) + phasor = np.exp( + np.vstack((np.zeros((1, M[1])), 1j * cPhase, pFiller, -1j * np.flipud(cPhase))) + ) # this is like my cols.m function in MATLAB r = np.tile(r, M[1]).reshape(M[1], N[0]).T ** (-beta / 2) # catch beta = 0 case here to ensure zero DC component if not beta: r[0] = 0 # inverse transform to get time series as columns, ensuring real output - X = r*phasor - r = np.real(np.fft.ifft(X, axis=0)*X.shape[0]) + X = r * phasor + r = np.real(np.fft.ifft(X, axis=0) * X.shape[0]) # renormalize r such that mean = 0 & std = 1 (MATLAB dof default used) # and return it in its original shape (i.e., a 1D vector, if req'd) @@ -513,9 +535,11 @@ def Ssmooth(S, w, n, window): for k in range(n): # f@#$ing MATLAB treats odd/even differently with mode='full' # but the behavior below now matches conv2 exactly - S = convolve2d(S, window(w).reshape(-1, 1), - mode='full')[w//2:-w//2+1, :] + S = convolve2d(S, window(w).reshape(-1, 1), mode="full")[ + w // 2 : -w // 2 + 1, : + ] return S + def triang(N): # for historical reasons, the default window shape return np.bartlett(N + 2)[1:-1] @@ -525,8 +549,8 @@ def triang(N): # Fourier transform of data matrix by time series columns, retain only # the diagonal & above (unique spectral components) Nx = x.shape - X = np.fft.fft(x, axis=0)/Nx[0] - X = X[:N//2+1, :] + X = np.fft.fft(x, axis=0) / Nx[0] + X = X[: N // 2 + 1, :] # form spectral matrix stack in reduced vector form (**significant** # speed improvement due to memory problem swith 3D tensor format -- what # was too slow in 1995 is still too slow in 2017!) @@ -548,20 +572,20 @@ def triang(N): # -- diagonal elements didx = [i for i in range(len(Sidx)) if Sidx[i][0] == Sidx[i][1]] # -- traceS**2 of each flapjack (really a vector here) - trS = sum(S[:, didx].real.T)**2 + trS = sum(S[:, didx].real.T) ** 2 # -- trace of each flapjack (ditto, vector), here we recognize that # trace(S@S.T) is just sum square magnitudes of all the # non-redundant components of S, doubling squares of the non-diagonal # elements - S = (S*(S.conj())*2).real + S = (S * (S.conj()) * 2).real S[:, didx] /= 2 trS2 = sum(S.T) # estimate samson-esque polarization estimate (if d==2, same as fowler) - P = (d*trS2 - trS)/((d-1)*trS) + P = (d * trS2 - trS) / ((d - 1) * trS) # a litle trick here to handle odd/even number of samples and zero-out # both the DC & Nyquist (they're both complex-contaminated due to Ssmooth) P[0] = 0 - if N%2: + if N % 2: # odd case: Nyquist is 1/2 freq step between highest components fudgeIdx = 0 else: @@ -569,10 +593,89 @@ def triang(N): fudgeIdx = 1 P[-1] = 0 # apply P as contrast agent to frequency series - X *= np.tile(P ** p, d).reshape(X.shape[::-1]).T + X *= np.tile(P**p, d).reshape(X.shape[::-1]).T # inverse transform X and ensure real output - XX = np.vstack((X[list(range(N // 2 + 1))], - X[list(range(N//2-fudgeIdx, 0, -1))].conj())) - x_psf = np.real(np.fft.ifft(XX, axis=0)*XX.shape[0]) + XX = np.vstack( + (X[list(range(N // 2 + 1))], X[list(range(N // 2 - fudgeIdx, 0, -1))].conj()) + ) + x_psf = np.real(np.fft.ifft(XX, axis=0) * XX.shape[0]) return x_psf, P + + +def alignTracesMinRMS(beamMatrix, wgt, lagMag=10): + r""" + Align traces to minimize RMS error with the beam. This function aligns traces with the estimated beam to minimize the root mean square (RMS) error between the beam and each trace. The traces are shifted by a number of samples within a specified range to minimize the RMS error. The function returns the beam formed from the adjusted traces. + + Args: + beamMatrix: ``(m, n)`` array; time series with ``m`` samples from ``n`` traces as columns + wgt: Vector of relative weights of length ``n`` (0 == exclude trace) + lagMag (int): Maximum lag value for trace adjustment + + Returns: + ``(m, )`` array of summed and weighted shifted traces to form a best beam + """ + + # calculate beam + beam = beamMatrix @ wgt / wgt.sum() + # compute RMS error between the beam and each trace + rmsErrors = np.array( + [ + ( + np.sqrt(np.nanmean((beam - beamMatrix[:, i]) ** 2)) + if not np.isnan(beamMatrix[:, i]).any() + else np.nan + ) + for i in range(beamMatrix.shape[1]) + ] + ) + # sort the indices of traces based on RMS errors in ascending order (samllest error first) + sortedIndices = np.argsort(rmsErrors) + sortedBeamMatrix = beamMatrix[:, sortedIndices] + # set lag values for adjusted RMS calculation + lags = np.arange(-lagMag, lagMag + 1) + bestLags = [] + + def shiftTrace(trace, lag): + """Shift array without wrap-around, filling shifted-in positions with NaN""" + results = np.empty_like(trace) + if lag > 0: + results[:lag] = np.nan + results[lag:] = trace[:-lag] + elif lag < 0: + results[lag:] = np.nan + results[:lag] = trace[-lag:] + else: + results = trace.copy() + return results + + # adjust traces based on minimizing RMS error with the beam + for i in range(sortedBeamMatrix.shape[1]): + trace = sortedBeamMatrix[:, i] + # compute rms errors at different lags by shifting the traces + rmsErrorsLags = [] + for lag in lags: + shiftedTrace = shiftTrace(trace, lag) + if np.isnan(shiftedTrace).all(): + rmsError = np.nan + else: + rmsError = np.sqrt(np.nanmean((beam - shiftedTrace) ** 2)) + rmsErrorsLags.append(rmsError) + rmsErrorsLags = np.array(rmsErrorsLags) + minRmsError = np.nanmin(rmsErrorsLags) + bestLagValue = lags[np.nanargmin(rmsErrorsLags)] + bestLags.append(bestLagValue) + + # shift traces based on best lag values + numSamples = beamMatrix.shape[0] + rmsShiftedBeamMatrix = np.empty((numSamples, sortedBeamMatrix.shape[1])) + for i in range(sortedBeamMatrix.shape[1]): + trace = sortedBeamMatrix[:, i] + lag = bestLags[i] + shiftedTrace = shiftTrace(trace, lag) + rmsShiftedBeamMatrix[:, i] = shiftedTrace + + # calculate beam from adjusted traces + beam = np.nanmean(rmsShiftedBeamMatrix, axis=1) + + return beam From 9dc35c321ceda995f1614803af08b117543664ed Mon Sep 17 00:00:00 2001 From: Liam Toney Date: Mon, 12 May 2025 10:22:20 -0600 Subject: [PATCH 3/8] Add function to convert tape-and-compass survey data to `rij` array --- array_processing/algorithms/helpers.py | 46 ++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/array_processing/algorithms/helpers.py b/array_processing/algorithms/helpers.py index d43aa24..38bb5e4 100644 --- a/array_processing/algorithms/helpers.py +++ b/array_processing/algorithms/helpers.py @@ -1,6 +1,7 @@ import numpy as np from obspy.geodetics import gps2dist_azimuth +_M_PER_KM = 1000 # [m/km] def getrij(latlist, lonlist): r""" @@ -33,8 +34,8 @@ def getrij(latlist, lonlist): # Convert azimuth in degrees to angle in radians ang = np.deg2rad((450 - az) % 360) # Convert from m to km, do trig - xnew[i] = (dist / 1000) * np.cos(ang) - ynew[i] = (dist / 1000) * np.sin(ang) + xnew[i] = (dist / _M_PER_KM) * np.cos(ang) + ynew[i] = (dist / _M_PER_KM) * np.sin(ang) # Remove the mean xnew = xnew - xnew.mean() @@ -44,3 +45,44 @@ def getrij(latlist, lonlist): rij = np.vstack((xnew, ynew)) return rij + + +def compass2rij(distances, azimuths): + """Convert tape-and-compass survey data to Cartesian :math:`x`–:math:`y` coordinates. + + The output type is the same as the :func:`getrij` function. Note that typically, + distances and azimuths will be surveyed from one of the array elements. In this + case, that array element will have distance 0 and azimuth 0. However, this function + can handle an arbitrary reference point for the distances and azimuths. This + function assumes that all array elements lie on the same plane. + + Args: + distances (array): Distances to each array element, in meters + azimuths (array): Azimuths to each array element, in degrees from **true** north + + Returns: + :class:`numpy.ndarray` with the first row corresponding to Cartesian + :math:`x`-coordinates and the second row corresponding to Cartesian + :math:`y`-coordinates, in units of km + """ + + # Type conversion and error checking + distances = np.array(distances) + azimuths = np.array(azimuths) + if distances.size != azimuths.size: + raise ValueError('There must be the same number of distances and azimuths') + assert (distances >= 0).all(), 'Distances cannot be negative' + assert ((azimuths >= 0) & (azimuths < 360)).all(), 'Azimuths must be 0–360°' + + # Convert distances and azimuths to Cartesian coordinates in units of km + x = distances * np.sin(np.deg2rad(azimuths)) / _M_PER_KM + y = distances * np.cos(np.deg2rad(azimuths)) / _M_PER_KM + + # Remove the mean + x -= x.mean() + y -= y.mean() + + # Form rij array + rij = np.vstack((x, y)) + + return rij From a09e80f680e974c4293be251806549ae8a617016 Mon Sep 17 00:00:00 2001 From: Liam Toney Date: Mon, 12 May 2025 10:31:06 -0600 Subject: [PATCH 4/8] Add to `__init__.py` --- array_processing/algorithms/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/array_processing/algorithms/__init__.py b/array_processing/algorithms/__init__.py index a07ae1d..4d8368f 100644 --- a/array_processing/algorithms/__init__.py +++ b/array_processing/algorithms/__init__.py @@ -1,3 +1,3 @@ from .fk_freq import fk_freq from .srcLoc import srcLoc -from .helpers import getrij +from .helpers import getrij, compass2rij From b9bc2991d5952645753ec90c1c2e18a435e03391 Mon Sep 17 00:00:00 2001 From: Liam Toney Date: Thu, 5 Jun 2025 15:53:33 -0600 Subject: [PATCH 5/8] Pin fastkml --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index ffe165b..0ebe0c2 100644 --- a/environment.yml +++ b/environment.yml @@ -4,7 +4,7 @@ channels: - defaults dependencies: - python<3.11 # Temporary fix (see https://github.com/numba/numba/issues/8304) - - fastkml + - fastkml<1 - ipython - obspy - numba From a0f3af4957939d42207dd6c093392952bcb11fd1 Mon Sep 17 00:00:00 2001 From: Liam Toney Date: Wed, 11 Jun 2025 09:48:16 -0600 Subject: [PATCH 6/8] Tweak error statement on azimuths --- array_processing/algorithms/helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/array_processing/algorithms/helpers.py b/array_processing/algorithms/helpers.py index 38bb5e4..c808667 100644 --- a/array_processing/algorithms/helpers.py +++ b/array_processing/algorithms/helpers.py @@ -72,7 +72,7 @@ def compass2rij(distances, azimuths): if distances.size != azimuths.size: raise ValueError('There must be the same number of distances and azimuths') assert (distances >= 0).all(), 'Distances cannot be negative' - assert ((azimuths >= 0) & (azimuths < 360)).all(), 'Azimuths must be 0–360°' + assert ((azimuths >= 0) & (azimuths < 360)).all(), 'Azimuths must be 0–359°' # Convert distances and azimuths to Cartesian coordinates in units of km x = distances * np.sin(np.deg2rad(azimuths)) / _M_PER_KM From 6ecd7f641c9e0cf9026af75e3368a33c6700ff33 Mon Sep 17 00:00:00 2001 From: Liam Toney Date: Wed, 16 Jul 2025 09:53:10 -0600 Subject: [PATCH 7/8] Update `readthedocs.yml` To conform to new RTD requirements --- readthedocs.yml | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/readthedocs.yml b/readthedocs.yml index 6b23b13..f4cc9c9 100644 --- a/readthedocs.yml +++ b/readthedocs.yml @@ -1,7 +1,15 @@ version: 2 +build: + os: "ubuntu-22.04" + tools: + python: "3.12" + +sphinx: + configuration: doc/conf.py + python: - install: - - requirements: doc/requirements.txt - - method: setuptools - path: . + install: + - requirements: doc/requirements.txt + - method: pip + path: . From e9dee42201098ba76c8d229662e96650a820e457 Mon Sep 17 00:00:00 2001 From: Liam Toney Date: Thu, 17 Jul 2025 10:43:28 -0600 Subject: [PATCH 8/8] Update `requirements.txt` Add doc building deps --- doc/requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/requirements.txt b/doc/requirements.txt index b0f8447..24e9142 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1 +1,3 @@ sphinxcontrib-apidoc +recommonmark +sphinx_rtd_theme