from dataclasses import dataclass import numpy as np from scipy import stats from scipy.stats import norm import pandas as pd @dataclass class Zuverlaessigkeit: @staticmethod def gesamtredundanz(n, u): r = n - u return r @staticmethod def berechne_R(Q_vv, P): R = Q_vv @ P return R #Redundanzmatrix @staticmethod def berechne_ri(R): ri = np.diag(R) EVi = 100.0 * ri return ri, EVi #Redundanzanteile @staticmethod def klassifiziere_ri(ri): #Klassifizierung der Redundanzanteile if ri < 0.01: return "nicht kontrollierbar" elif ri < 0.10: return "schlecht kontrollierbar" elif ri < 0.30: return "ausreichend kontrollierbar" elif ri < 0.70: return "gut kontrollierbar" else: return "nahezu vollständig redundant" @staticmethod def globaltest(r_gesamt, sigma0_apost, sigma0_apriori, alpha): T_G = (sigma0_apost ** 2) / (sigma0_apriori ** 2) F_krit = stats.f.ppf(1 - alpha, r_gesamt, 10 ** 9) H0 = T_G < F_krit if H0: interpretation = ( "Nullhypothese H₀ angenommen.\n" ) else: interpretation = ( "Nullhypothese H₀ verworfen!\n" "Dies kann folgende Gründe haben:\n" "→ Es befinden sich grobe Fehler im Datenmaterial. Bitte Lokaltest durchführen und ggf. grobe Fehler im Datenmaterial entfernen.\n" "→ Das stochastische Modell ist zu optimistisch. Bitte Gewichte überprüfen und ggf. anpassen." ) return { "r_gesamt": r_gesamt, "sigma0_apost": sigma0_apost, "sigma0_apriori": sigma0_apriori, "alpha": alpha, "T_G": T_G, "F_krit": F_krit, "H0_angenommen": H0, "Interpretation": interpretation, } def lokaltest_innere_Zuverlaessigkeit(v, Q_vv, ri, labels, s0_apost, alpha, beta): v = np.asarray(v, float).reshape(-1) Q_vv = np.asarray(Q_vv, float) ri = np.asarray(ri, float).reshape(-1) labels = list(labels) # Grobfehlerabschätzung: ri_ = np.where(ri == 0, np.nan, ri) GF = -v / ri_ # Standardabweichungen der Residuen qv = np.diag(Q_vv).astype(float) s_vi = float(s0_apost) * np.sqrt(qv) # Normierte Verbesserung NV NV = np.abs(v) / s_vi # Quantile k und kA (zweiseitig), k = float(norm.ppf(1 - alpha / 2)) kA = float(norm.ppf(1 - beta)) # (Testmacht 1-β) # Nichtzentralitätsparameter δ0 nzp = k + kA # Grenzwert für die Aufdeckbarkeit eines GF (GRZW) GRZW_i = (s_vi / ri_) * nzp auffaellig = NV > nzp Lokaltest_innere_Zuv = pd.DataFrame({ "Beobachtung": labels, "v_i": v, "r_i": ri, "s_vi": s_vi, "k": k, "NV_i": NV, "auffaellig": auffaellig, "GF_i": GF, "GRZW_i": GRZW_i, "alpha": alpha, "beta": beta, "kA": kA, "δ0": nzp, }) return Lokaltest_innere_Zuv def aeussere_zuverlaessigkeit_EF_EP1(Lokaltest, labels, Qxx, A, P, s0_apost, unbekannten_liste, x): df = Lokaltest.copy() labels = list(labels) Qxx = np.asarray(Qxx, float) A = np.asarray(A, float) P = np.asarray(P, float) x = np.asarray(x, float).reshape(-1) ri = df["r_i"].astype(float).to_numpy() GF = df["GF_i"].astype(float).to_numpy() s_vi = df["s_vi"].astype(float).to_numpy() GRZW = df["GRZW_i"].astype(float).to_numpy() nzp = df["δ0"].astype(float).to_numpy() n = A.shape[0] # Anzahl Beobachtungen u = A.shape[1] # Anzahl Unbekannte # Einflussfaktor EF berechnen EF = np.zeros(n, dtype=float) for i in range(n): # 1) ∇l_i aufstellen nabla_l = np.zeros((n, 1)) nabla_l[i, 0] = GRZW[i] # 2) ∇x_i = Qxx * A^T * P * ∇l_i nabla_x = Qxx @ (A.T @ (P @ nabla_l)) # 3) EF_i^2 = (∇x_i^T * Qxx^{-1} * ∇x_i) / s0^2 Qxx_inv_nabla_x = np.linalg.solve(Qxx, nabla_x) # = Qxx^{-1} ∇x_i #EF2 = float((nabla_x.T @ Qxx_inv_nabla_x) / (float(s0_apost) ** 2)).item() EF2 = ((nabla_x.T @ Qxx_inv_nabla_x) / (float(s0_apost) ** 2)).item() EF[i] = np.sqrt(EF2) # Koordinaten-Dict aus x coords = {} j = 0 while j < len(unbekannten_liste): name = str(unbekannten_liste[j]) if name.startswith("X"): pn = name[1:] coords[pn] = (x[j], x[j + 1], x[j + 2]) j += 3 else: j += 1 # EP + Standpunkte EP_m = np.full(len(labels), np.nan, dtype=float) standpunkte = [""] * len(labels) for i, lbl in enumerate(labels): parts = lbl.split("_") sp = None zp = None # Tachymeter: ID_SD_GRP_SP_ZP / ID_R_GRP_SP_ZP / ID_ZW_GRP_SP_ZP if ("_SD_" in lbl) or ("_R_" in lbl) or ("_ZW_" in lbl): if len(parts) >= 5: sp = parts[3].strip() zp = parts[4].strip() # GNSS: *_gnssbx_SP_ZP etc. if ("gnss" in lbl) and (len(parts) >= 4): sp = parts[-2].strip() zp = parts[-1].strip() standpunkte[i] = sp if sp is not None else "" one_minus_r = (1.0 - ri[i]) # SD + GNSS: direkt in m if ("_SD_" in lbl) or ("gnss" in lbl): EP_m[i] = one_minus_r * GF[i] # R / ZW: Winkel -> Streckenäquivalent über s elif ("_R_" in lbl) or ("_ZW_" in lbl): if sp and zp and (sp in coords) and (zp in coords): X1, Y1, Z1 = coords[sp] X2, Y2, Z2 = coords[zp] s = float(np.sqrt((X2 - X1) ** 2 + (Y2 - Y1) ** 2 + (Z2 - Z1) ** 2)) EP_m[i] = one_minus_r * ((GF[i]) * s) # SP am Standpunkt (2D) diagQ = np.diag(Qxx) SP_cache_mm = {} for sp in set([s for s in standpunkte if s]): idx_x = [k for k, sym in enumerate(unbekannten_liste) if str(sym) == f"X{sp}"][0] qx = diagQ[idx_x] qy = diagQ[idx_x + 1] SP_cache_mm[sp] = float(s0_apost) * np.sqrt(qx + qy) * 1000.0 SP_mm = np.array([SP_cache_mm.get(sp, np.nan) for sp in standpunkte], dtype=float) out = pd.DataFrame({ "Beobachtung": labels, "Stand-Pkt": standpunkte, "EF": EF, "EP [mm]": EP_m * 1000.0, "SP [mm]": SP_mm, "EF*SP [mm]": EF * SP_mm, }) return out def aeussere_zuverlaessigkeit_EF_EP_stabil(Lokaltest, labels, Qxx, A, P, s0_apost, unbekannten_liste, x): df = Lokaltest.copy() labels = list(labels) Qxx = np.asarray(Qxx, float) A = np.asarray(A, float) P = np.asarray(P, float) x = np.asarray(x, float).reshape(-1) ri = df["r_i"].astype(float).to_numpy() GF = df["GF_i"].astype(float).to_numpy() GRZW = df["GRZW_i"].astype(float).to_numpy() n = A.shape[0] # Namen als Strings für die Suche namen_str = [str(sym) for sym in unbekannten_liste] # 1) Einflussfaktor EF berechnen EF = np.zeros(n, dtype=float) for i in range(n): nabla_l = np.zeros((n, 1)) nabla_l[i, 0] = GRZW[i] nabla_x = Qxx @ (A.T @ (P @ nabla_l)) Qxx_inv_nabla_x = np.linalg.solve(Qxx, nabla_x) EF2 = ((nabla_x.T @ Qxx_inv_nabla_x) / (float(s0_apost) ** 2)).item() EF[i] = np.sqrt(max(0, EF2)) # 2) Koordinaten-Dict coords = {} punkt_ids = [n[1:] for n in namen_str if n.upper().startswith("X")] for pid in punkt_ids: try: ix = namen_str.index(f"X{pid}") iy = namen_str.index(f"Y{pid}") iz = namen_str.index(f"Z{pid}") coords[pid] = (x[ix], x[iy], x[iz] if iz is not None else 0.0) except: continue # 3) EP + Standpunkte EP_m = np.full(len(labels), np.nan, dtype=float) standpunkte = [""] * len(labels) for i, lbl in enumerate(labels): parts = lbl.split("_") sp, zp = None, None if any(k in lbl for k in ["_SD_", "_R_", "_ZW_"]): if len(parts) >= 5: sp, zp = parts[3].strip(), parts[4].strip() elif "gnss" in lbl.lower(): sp, zp = parts[-2].strip(), parts[-1].strip() elif "niv" in lbl.lower(): if len(parts) >= 4: sp = parts[3].strip() else: sp = parts[-1].strip() standpunkte[i] = sp if sp is not None else "" one_minus_r = (1.0 - ri[i]) # SD, GNSS, Niv: direkt Wegfehler if "_SD_" in lbl or "gnss" in lbl.lower() or "niv" in lbl.lower(): EP_m[i] = one_minus_r * GF[i] # Winkel: Streckenäquivalent elif "_R_" in lbl or "_ZW_" in lbl: if sp in coords and zp in coords: X1, Y1, _ = coords[sp] X2, Y2, _ = coords[zp] s = np.sqrt((X2 - X1) ** 2 + (Y2 - Y1) ** 2) EP_m[i] = one_minus_r * (GF[i] * s) # 4) SP am Standpunkt (2D oder 1D) diagQ = np.diag(Qxx) SP_cache_mm = {} for sp in set([s for s in standpunkte if s]): try: ix = namen_str.index(f"X{sp}") iy = namen_str.index(f"Y{sp}") SP_cache_mm[sp] = float(s0_apost) * np.sqrt(diagQ[ix] + diagQ[iy]) * 1000.0 except ValueError: # Falls keine Lage, prüfe Höhe (Nivellement) try: iz = namen_str.index(f"Z{sp}") SP_cache_mm[sp] = float(s0_apost) * np.sqrt(diagQ[iz]) * 1000.0 except ValueError: SP_cache_mm[sp] = 0.0 SP_mm = np.array([SP_cache_mm.get(sp, np.nan) for sp in standpunkte], dtype=float) return pd.DataFrame({ "Beobachtung": labels, "Stand-Pkt": standpunkte, "EF": EF, "EP [mm]": EP_m * 1000.0, "SP [mm]": SP_mm, "EF*SP [mm]": EF * SP_mm })