import numpy as np import math import sys # --- Constants --- N_MAX_SIZE = 300 # math.pi provides higher precision than hardcoded literal PI_CONST = math.pi # --- Global Arrays (sized for 1-based indexing, so N_MAX_SIZE + 1) --- # These globals mimic the C++ structure. In larger Python programs, # encapsulating related data in classes would be more idiomatic. x = np.zeros(N_MAX_SIZE + 1, dtype=np.float64) f = np.zeros(N_MAX_SIZE + 1, dtype=np.float64) fpe = np.zeros(N_MAX_SIZE + 1, dtype=np.float64) spec = np.zeros(N_MAX_SIZE + 1, dtype=np.float64) g = np.zeros(N_MAX_SIZE + 1, dtype=np.float64) c = np.zeros(N_MAX_SIZE + 1, dtype=np.float64) # --- Function Definitions --- # ## Function: levins # Implements part of the Levinson-Durbin algorithm to update Burg's AR coefficients # and prediction error power. def levins(nmax, g_arr, pm_val_ref, b1_arr, b2_arr, a_arr, m): sn = 0.0 sd = 0.0 for i in range(1, (nmax - m) + 1): sn += b1_arr[i] * b2_arr[i] sd += b1_arr[i] * b1_arr[i] + b2_arr[i] * b2_arr[i] if sd == 0.0: # Prevent division by zero g_arr[m] = 0.0 else: g_arr[m] = -2.0 * sn / sd pm_val_ref[0] *= (1.0 - g_arr[m] * g_arr[m]) # Update pm passed by reference if m != 1: for k in range(1, m): # Loop up to m-1 g_arr[k] = a_arr[k] + g_arr[m] * a_arr[m - k] for i in range(1, m + 1): # Store current g coefficients in 'a' for next iteration a_arr[i] = g_arr[i] # Update forward and backward prediction errors (b1 and b2) for i in range(1, nmax - m): # Loop up to nmax-m-1 temp_b1_i = b1_arr[i] # Store original b1[i] b1_arr[i] += a_arr[m] * b2_arr[i] # Update b1[i] b2_arr[i] = b2_arr[i+1] + a_arr[m] * temp_b1_i # Update b2[i] # ## Function: burg # Estimates AR coefficients and autocorrelation function using Burg's method. def burg(nmax, mmx, lmax, x_arr, g_arr, c_arr, fpe_arr, pm_val_ref): # Dynamically allocated arrays in C++ are now local NumPy arrays in Python # 'a' stores g coefficients up to m, so mmx+1 is sufficient b1_local = np.zeros(nmax + 1, dtype=np.float64) b2_local = np.zeros(nmax + 1, dtype=np.float64) a_local = np.zeros(mmx + 1, dtype=np.float64) sum_val = 0.0 for i in range(1, nmax + 1): sum_val += x_arr[i] * x_arr[i] c_arr[1] = sum_val / float(nmax) # Auto-correlation at lag 0 (variance) pm_val_ref[0] = c_arr[1] # Initial prediction error power (passed by reference) if (nmax - 1) == 0: # Avoid division by zero fpe_arr[1] = 0.0 else: fpe_arr[1] = float(nmax + 1) / float(nmax - 1) * pm_val_ref[0] # Initial FPE lag = mmx + 1 lg1 = lag + 1 # Initialize forward and backward prediction errors for Burg's algorithm b1_local[1] = x_arr[1] for i in range(2, nmax + 1): b1_local[i] = x_arr[i] # Forward error: current data point b2_local[i-1] = x_arr[i] # Backward error: previous data point for the first step for m in range(1, mmx + 1): print(f"\n M={m:4d} ", end='') # Print without newline levins(nmax, g_arr, pm_val_ref, b1_local, b2_local, a_local, m) # Update g, pm, b1, b2, a sum_val = 0.0 for i in range(1, m + 1): sum_val -= c_arr[m - i + 1] * g_arr[i] # Calculate next auto-correlation point c_arr[m + 1] = sum_val if m != (nmax - 1): # Avoid division by zero when nmax-m-1 becomes 0 if (nmax - m - 1) == 0: fpe_arr[m + 1] = 0.0 else: fpe_arr[m + 1] = float(nmax + m + 1) / float(nmax - m - 1) * pm_val_ref[0] # Extrapolate auto-correlation function if lmax > lag if lag < lmax: for l_idx in range(lg1, lmax + 1): # Renamed 'l' to 'l_idx' to avoid conflict with 'l' in main sum_val = 0.0 for i in range(1, mmx + 1): # Iterate over all mmx coefficients if (l_idx - i) >= 0 and (l_idx - i) <= lmax: # Ensure c[l_idx-i] is a valid index sum_val -= c_arr[l_idx - i] * g_arr[i] c_arr[l_idx] = sum_val # ## Function: mem_1 # Calculates the Maximum Entropy Method power spectrum. def mem_1(nmax, mmx, dt_param, g_arr, f_arr, spec_arr, pm_val): if (nmax - 1) == 0: # Prevent division by zero f0 = 0.0 else: f0 = 1.0 / (float(nmax - 1) * dt_param) for i in range(1, nmax + 1): f_arr[i] = f0 * float(i - 1) # Frequency points print(f"\n freq={f_arr[i]:6.2f}", end='') sumr = 1.0 sumi = 0.0 ci_freq = 2.0 * PI_CONST * f_arr[i] * dt_param # Calculate omega * dt for j in range(1, mmx + 1): sumr += g_arr[j] * math.cos(float(j) * ci_freq) sumi += g_arr[j] * math.sin(float(j) * ci_freq) denominator = (sumr * sumr + sumi * sumi) if denominator == 0.0: # Prevent division by zero spec_arr[i] = 1e18 # Assign a very large number for "infinity" else: spec_arr[i] = 2.0 * pm_val * dt_param / denominator # ## Function: mk_dat # Generates synthetic data for MEM analysis. def mk_dat(x_arr, nmax_ref, mmx_ref, lmax_ref, dt_param_ref): nmax_ref[0] = 241 # Number of data points (passed by reference) mmx_ref[0] = 38 # Model order (AR order) (passed by reference) lmax_ref[0] = 50 # Maximum lag for auto-correlation (passed by reference) if (nmax_ref[0] - 1) == 0: # Prevent division by zero dt_param_ref[0] = 0.0 else: dt_param_ref[0] = 1.0 / float(nmax_ref[0] - 1) # Sampling interval # Periods of sine waves t1 = 1.0 / 124.5 t2 = 1.0 / 65.0 t3 = 1.0 / 64.0 t4 = 1.0 / 4.0 # Generate synthetic data for i in range(1, nmax_ref[0] + 1): t = dt_param_ref[0] * float(i - 1) # Time point y = math.sin(2.0 * PI_CONST * t / t1) + \ math.sin(2.0 * PI_CONST * t / t2) + \ math.sin(2.0 * PI_CONST * t / t3) + \ math.sin(2.0 * PI_CONST * t / t4) x_arr[i] = y # /* + 2.0 * drand48() - 2; // Commented out: for adding noise */ # ## Main Function # This is the entry point of the program. def main(): # Variables that were pointers in C++ are now passed as single-element NumPy arrays # to mimic pass-by-reference behavior. nmax_ref = np.array([0], dtype=int) mmx_ref = np.array([0], dtype=int) lmax_ref = np.array([0], dtype=int) dt_val_ref = np.array([0.0], dtype=np.float64) pm_ref = np.array([0.0], dtype=np.float64) # For pm passed by reference to burg # Data generation and parameter setup mk_dat(x, nmax_ref, mmx_ref, lmax_ref, dt_val_ref) # Extract values from reference arrays after mk_dat modifies them nmax = nmax_ref[0] mmx = mmx_ref[0] lmax = lmax_ref[0] dt_val = dt_val_ref[0] lag = mmx + 1 if (lag + 1) > lmax: lmax = lag + 1 # Remove mean from data sum_val = 0.0 for i in range(1, nmax + 1): sum_val += x[i] av = sum_val / float(nmax) for i in range(1, nmax + 1): x[i] -= av # Perform Burg's algorithm for AR model estimation burg(nmax, mmx, lmax, x, g, c, fpe, pm_ref) # Extract pm value after burg modifies it pm = pm_ref[0] # Calculate Maximum Entropy Method spectrum mem_1(nmax, mmx, dt_val, g, f, spec, pm) # Print results lag = mmx + 1 lg1 = lag + 1 lx1 = lmax + 1 print("\n frec spc fpe auto c.") # Print up to 'lag' (mmx + 1) points for i in range(1, lag + 1): print(f"\n{f[i]:6.1f} {spec[i]:7.3f} {fpe[i]:7.3f} {c[i]:7.3f}", end='') # If 'lag' is less than 'lmax', print remaining auto-correlation points if lag < lmax: for i in range(lg1, lmax + 1): print(f"\n{f[i]:6.1f} {spec[i]:7.3f} {c[i]:6.3f}", end='') # Print remaining spectrum points up to nmax for i in range(lx1, nmax + 1): print(f"\n{f[i]:6.1f} {spec[i]:7.3f}", end='') print("\n") # Add a final newline for clean output sys.exit(0) # Indicate successful execution # --- Program Entry Point --- if __name__ == "__main__": main()