From d80492a9e57c603dcfe736dfa01a08a44955cfc6 Mon Sep 17 00:00:00 2001 From: "Tammo.Weber" Date: Mon, 2 Feb 2026 18:38:17 +0100 Subject: [PATCH] Funktioniert jetzt mit allen Panou --- GHA_triaxial/panou_2013_2GHA_num.py | 108 +++++++++++++++++++++------- 1 file changed, 81 insertions(+), 27 deletions(-) diff --git a/GHA_triaxial/panou_2013_2GHA_num.py b/GHA_triaxial/panou_2013_2GHA_num.py index deb9212..4d4d229 100644 --- a/GHA_triaxial/panou_2013_2GHA_num.py +++ b/GHA_triaxial/panou_2013_2GHA_num.py @@ -30,6 +30,22 @@ def gha2_num(ell: EllipsoidTriaxial, beta_1: float, lamb_1: float, beta_2: float def arccot(x): return np.arctan2(1.0, x) + def cot(a): + return np.cos(a) / np.sin(a) + + def wrap_to_pi(x): + return (x + np.pi) % (2 * np.pi) - np.pi + + def sph_azimuth(beta1, lam1, beta2, lam2): + # sphärischer Anfangsazimut (von Norden/meridian, im Bogenmaß) + dlam = wrap_to_pi(lam2 - lam1) + y = np.sin(dlam) * np.cos(beta2) + x = np.cos(beta1) * np.sin(beta2) - np.sin(beta1) * np.cos(beta2) * np.cos(dlam) + a = np.arctan2(y, x) # (-pi, pi] + if a < 0: + a += 2 * np.pi + return a + def BETA_LAMBDA(beta, lamb): BETA = (ell.ay**2 * np.sin(beta)**2 + ell.b**2 * np.cos(beta)**2) / (ell.Ex**2 - ell.Ey**2 * np.sin(beta)**2) @@ -158,11 +174,13 @@ def gha2_num(ell: EllipsoidTriaxial, beta_1: float, lamb_1: float, beta_2: float N = n dlamb = lamb_2 - lamb_1 + alpha0_sph = sph_azimuth(beta_1, lamb_1, beta_2, lamb_2) if abs(dlamb) < 1e-15: beta_0 = 0.0 else: - beta_0 = (beta_2 - beta_1) / (lamb_2 - lamb_1) + (_, _, E1, G1, *_) = BETA_LAMBDA(beta_1, lamb_1) + beta_0 = np.sqrt(G1 / E1) * cot(alpha0_sph) converged = False iterations = 0 @@ -170,40 +188,76 @@ def gha2_num(ell: EllipsoidTriaxial, beta_1: float, lamb_1: float, beta_2: float # funcs = functions() ode_lamb = buildODElamb() - for i in range(iter_max): - iterations = i + 1 + def solve_newton(beta_p0_init: float): + beta_p0 = float(beta_p0_init) - # startwerte = [lamb_1, beta_1, beta_0, 0.0, 1.0] - startwerte = np.array([beta_1, beta_0, 0.0, 1.0]) + for _ in range(iter_max): + startwerte = np.array([beta_1, beta_p0, 0.0, 1.0], dtype=float) + lamb_list, states = rk.rk4(ode_lamb, lamb_1, startwerte, dlamb, N, False) - # werte = rk.verfahren(funcs, startwerte, dlamb, N) - lamb_list, werte = rk.rk4(ode_lamb, lamb_1, startwerte, dlamb, N, False) - # lamb_end, beta_end, beta_p_end, X3_end, X4_end = werte[-1] - lamb_end = lamb_list[-1] - beta_end, beta_p_end, X3_end, X4_end = werte[-1] + beta_end, beta_p_end, X3_end, X4_end = states[-1] + delta = beta_end - beta_2 - d_beta_end_d_beta0 = X3_end - delta = beta_end - beta_2 + if abs(delta) < epsilon: + return True, beta_p0, lamb_list, states - if abs(delta) < epsilon: - converged = True - break + d_beta_end_d_beta0 = X3_end + if abs(d_beta_end_d_beta0) < 1e-20: + return False, None, None, None - if abs(d_beta_end_d_beta0) < 1e-20: - raise RuntimeError("Abbruch.") + step = delta / d_beta_end_d_beta0 + max_step = 0.5 + if abs(step) > max_step: + step = np.sign(step) * max_step - max_step = 0.5 - step = delta / d_beta_end_d_beta0 - if abs(step) > max_step: - step = np.sign(step) * max_step - beta_0 = beta_0 - step + beta_p0 = beta_p0 - step - if not converged: - raise RuntimeError("konvergiert nicht.") + return False, None, None, None - # Z - # werte = rk.verfahren(funcs, [lamb_1, beta_1, beta_0, 0.0, 1.0], dlamb, N, False) - lamb_list, werte = rk.rk4(ode_lamb, lamb_1, np.array([beta_1, beta_0, 0.0, 1.0]), dlamb, N, False) + alpha0_sph = sph_azimuth(beta_1, lamb_1, beta_2, lamb_2) + (_, _, E1, G1, *_) = BETA_LAMBDA(beta_1, lamb_1) + beta_p0_sph = np.sqrt(G1 / E1) * cot(alpha0_sph) + + guesses = [ + beta_p0_sph, + 0.5 * beta_p0_sph, + 2.0 * beta_p0_sph, + -beta_p0_sph, + -0.5 * beta_p0_sph, + ] + + best = None + + for g in guesses: + ok, beta_p0_sol, lamb_list_cand, states_cand = solve_newton(g) + if not ok: + continue + + beta_arr_c = np.array([st[0] for st in states_cand], dtype=float) + beta_p_arr_c = np.array([st[1] for st in states_cand], dtype=float) + lamb_arr_c = np.array(lamb_list_cand, dtype=float) + + integrand = np.zeros(N + 1) + for i in range(N + 1): + (_, _, Ei, Gi, *_) = BETA_LAMBDA(beta_arr_c[i], lamb_arr_c[i]) + integrand[i] = np.sqrt(Ei * beta_p_arr_c[i] ** 2 + Gi) + + h = abs(dlamb) / N + if N % 2 == 0: + S = integrand[0] + integrand[-1] \ + + 4.0 * np.sum(integrand[1:-1:2]) \ + + 2.0 * np.sum(integrand[2:-1:2]) + s_cand = h / 3.0 * S + else: + s_cand = np.trapz(integrand, dx=h) + + if (best is None) or (s_cand < best[0]): + best = (s_cand, beta_p0_sol, lamb_list_cand, states_cand) + + if best is None: + raise RuntimeError("Keine Multi-Start-Variante konvergiert.") + + s_best, beta_0, lamb_list, werte = best beta_arr = np.zeros(N + 1) # lamb_arr = np.zeros(N + 1)