310 lines
9.4 KiB
Python
310 lines
9.4 KiB
Python
from dataclasses import dataclass
|
|
import numpy as np
|
|
from scipy import stats
|
|
from scipy.stats import norm
|
|
import pandas as pd
|
|
|
|
@dataclass
|
|
class Zuverlaessigkeit:
|
|
|
|
@staticmethod
|
|
def gesamtredundanz(n, u):
|
|
r = n - u
|
|
return r
|
|
|
|
|
|
@staticmethod
|
|
def berechne_R(Q_vv, P):
|
|
R = Q_vv @ P
|
|
return R #Redundanzmatrix
|
|
|
|
|
|
@staticmethod
|
|
def berechne_ri(R):
|
|
ri = np.diag(R)
|
|
EVi = 100.0 * ri
|
|
return ri, EVi #Redundanzanteile
|
|
|
|
|
|
@staticmethod
|
|
def klassifiziere_ri(ri): #Klassifizierung der Redundanzanteile
|
|
if ri < 0.01:
|
|
return "nicht kontrollierbar"
|
|
elif ri < 0.10:
|
|
return "schlecht kontrollierbar"
|
|
elif ri < 0.30:
|
|
return "ausreichend kontrollierbar"
|
|
elif ri < 0.70:
|
|
return "gut kontrollierbar"
|
|
else:
|
|
return "nahezu vollständig redundant"
|
|
|
|
@staticmethod
|
|
def globaltest(r_gesamt, sigma0_apost, sigma0_apriori, alpha):
|
|
T_G = (sigma0_apost ** 2) / (sigma0_apriori ** 2)
|
|
F_krit = stats.f.ppf(1 - alpha, r_gesamt, 10 ** 9)
|
|
H0 = T_G < F_krit
|
|
|
|
if H0:
|
|
interpretation = (
|
|
"Nullhypothese H₀ angenommen.\n"
|
|
)
|
|
else:
|
|
interpretation = (
|
|
"Nullhypothese H₀ verworfen!\n"
|
|
"Dies kann folgende Gründe haben:\n"
|
|
"→ Es befinden sich grobe Fehler im Datenmaterial. Bitte Lokaltest durchführen und ggf. grobe Fehler im Datenmaterial entfernen.\n"
|
|
"→ Das stochastische Modell ist zu optimistisch. Bitte Gewichte überprüfen und ggf. anpassen."
|
|
)
|
|
|
|
return {
|
|
"r_gesamt": r_gesamt,
|
|
"sigma0_apost": sigma0_apost,
|
|
"sigma0_apriori": sigma0_apriori,
|
|
"alpha": alpha,
|
|
"T_G": T_G,
|
|
"F_krit": F_krit,
|
|
"H0_angenommen": H0,
|
|
"Interpretation": interpretation,
|
|
}
|
|
|
|
|
|
def lokaltest_innere_Zuverlaessigkeit(v, Q_vv, ri, labels, s0_apost, alpha, beta):
|
|
v = np.asarray(v, float).reshape(-1)
|
|
Q_vv = np.asarray(Q_vv, float)
|
|
ri = np.asarray(ri, float).reshape(-1)
|
|
labels = list(labels)
|
|
|
|
# Grobfehlerabschätzung:
|
|
ri_ = np.where(ri == 0, np.nan, ri)
|
|
GF = -v / ri_
|
|
|
|
# Standardabweichungen der Residuen
|
|
qv = np.diag(Q_vv).astype(float)
|
|
s_vi = float(s0_apost) * np.sqrt(qv)
|
|
|
|
# Normierte Verbesserung NV
|
|
NV = np.abs(v) / s_vi
|
|
|
|
# Quantile k und kA (zweiseitig),
|
|
k = float(norm.ppf(1 - alpha / 2))
|
|
kA = float(norm.ppf(1 - beta)) # (Testmacht 1-β)
|
|
|
|
# Nichtzentralitätsparameter δ0
|
|
nzp = k + kA
|
|
|
|
# Grenzwert für die Aufdeckbarkeit eines GF (GRZW)
|
|
GRZW_i = (s_vi / ri_) * nzp
|
|
|
|
auffaellig = NV > nzp
|
|
|
|
Lokaltest_innere_Zuv = pd.DataFrame({
|
|
"Beobachtung": labels,
|
|
"v_i": v,
|
|
"r_i": ri,
|
|
"s_vi": s_vi,
|
|
"k": k,
|
|
"NV_i": NV,
|
|
"auffaellig": auffaellig,
|
|
"GF_i": GF,
|
|
"GRZW_i": GRZW_i,
|
|
"alpha": alpha,
|
|
"beta": beta,
|
|
"kA": kA,
|
|
"δ0": nzp,
|
|
})
|
|
return Lokaltest_innere_Zuv
|
|
|
|
|
|
|
|
def aeussere_zuverlaessigkeit(
|
|
Lokaltest, labels, Qxx, A, P, s0_apost, unbekannten_liste, x,
|
|
angle_units="rad",
|
|
ep_use_abs=True,
|
|
exclude_prefixes=("lA_",),
|
|
):
|
|
df = Lokaltest.copy()
|
|
labels = [str(l) for l in list(labels)]
|
|
|
|
Qxx = np.asarray(Qxx, float)
|
|
A = np.asarray(A, float)
|
|
P = np.asarray(P, float)
|
|
x = np.asarray(x, float).reshape(-1)
|
|
|
|
namen_str = [str(sym) for sym in unbekannten_liste]
|
|
|
|
n = A.shape[0]
|
|
if len(labels) != n:
|
|
raise ValueError(f"len(labels)={len(labels)} passt nicht zu A.shape[0]={n}.")
|
|
if len(df) != n:
|
|
raise ValueError(f"Lokaltest hat {len(df)} Zeilen, A hat {n} Beobachtungen.")
|
|
|
|
# Pseudobeobachtungen rausfiltern
|
|
keep = np.ones(n, dtype=bool)
|
|
if exclude_prefixes:
|
|
for i, lbl in enumerate(labels):
|
|
if any(lbl.startswith(pref) for pref in exclude_prefixes):
|
|
keep[i] = False
|
|
|
|
# alles konsistent kürzen (wichtig: auch A & P!)
|
|
df = df.loc[keep].reset_index(drop=True)
|
|
labels = [lbl for (lbl, k) in zip(labels, keep) if k]
|
|
A = A[keep, :]
|
|
P = P[np.ix_(keep, keep)]
|
|
|
|
# neue n
|
|
n = A.shape[0]
|
|
|
|
# Daten aus dem Lokaltest
|
|
ri = df["r_i"].astype(float).to_numpy()
|
|
GF = df["GF_i"].astype(float).to_numpy()
|
|
GRZW = df["GRZW_i"].astype(float).to_numpy()
|
|
|
|
s0 = float(s0_apost)
|
|
|
|
def to_rad(val):
|
|
if angle_units == "rad":
|
|
return val
|
|
if angle_units == "gon":
|
|
return val * (np.pi / 200.0)
|
|
if angle_units == "deg":
|
|
return val * (np.pi / 180.0)
|
|
raise ValueError("angle_units muss 'rad', 'gon' oder 'deg' sein.")
|
|
|
|
# Punktkoordinaten aus x (für Streckenäquivalent bei Winkel-EP)
|
|
coords = {}
|
|
punkt_ids = sorted({name[1:] for name in namen_str
|
|
if name[:1].upper() in ("X", "Y", "Z") and len(name) > 1})
|
|
for pid in punkt_ids:
|
|
try:
|
|
ix = namen_str.index(f"X{pid}")
|
|
iy = namen_str.index(f"Y{pid}")
|
|
iz = namen_str.index(f"Z{pid}")
|
|
coords[pid] = (x[ix], x[iy], x[iz])
|
|
except ValueError:
|
|
continue
|
|
|
|
# Standpunkt/Zielpunkt
|
|
standpunkte = [""] * n
|
|
zielpunkte = [""] * n
|
|
for i, lbl in enumerate(labels):
|
|
parts = lbl.split("_")
|
|
sp, zp = None, None
|
|
|
|
if any(k in lbl for k in ["_SD_", "_R_", "_ZW_"]):
|
|
if len(parts) >= 5:
|
|
sp, zp = parts[3].strip(), parts[4].strip()
|
|
elif "gnss" in lbl.lower():
|
|
if len(parts) >= 2:
|
|
sp, zp = parts[-2].strip(), parts[-1].strip()
|
|
elif "niv" in lbl.lower():
|
|
if len(parts) >= 4:
|
|
sp = parts[3].strip()
|
|
if len(parts) >= 5:
|
|
zp = parts[4].strip()
|
|
else:
|
|
sp = parts[-1].strip()
|
|
|
|
standpunkte[i] = sp or ""
|
|
zielpunkte[i] = zp or ""
|
|
|
|
# Berechnung des EPs
|
|
EP_GF = (1.0 - ri) * GF
|
|
EP_grzw = (1.0 - ri) * GRZW
|
|
if ep_use_abs:
|
|
EP_GF = np.abs(EP_GF)
|
|
EP_grzw = np.abs(EP_grzw)
|
|
|
|
EP_hat_m = np.full(n, np.nan, float)
|
|
EP_grzw_m = np.full(n, np.nan, float)
|
|
|
|
for i, lbl in enumerate(labels):
|
|
sp = standpunkte[i]
|
|
zp = zielpunkte[i]
|
|
|
|
is_angle = ("_R_" in lbl) or ("_ZW_" in lbl)
|
|
if not is_angle:
|
|
EP_hat_m[i] = EP_GF[i]
|
|
EP_grzw_m[i] = EP_grzw[i]
|
|
continue
|
|
|
|
# Winkel -> Querabweichung = Winkel(rad) * Strecke (3D)
|
|
if sp in coords and zp in coords:
|
|
X1, Y1, Z1 = coords[sp]
|
|
X2, Y2, Z2 = coords[zp]
|
|
s = np.sqrt((X2 - X1) ** 2 + (Y2 - Y1) ** 2 + (Z2 - Z1) ** 2)
|
|
|
|
EP_hat_m[i] = to_rad(EP_GF[i]) * s
|
|
EP_grzw_m[i] = to_rad(EP_grzw[i]) * s
|
|
|
|
# 3x3 Blöcke
|
|
def idx_xyz(pid):
|
|
return [
|
|
namen_str.index(f"X{pid}"),
|
|
namen_str.index(f"Y{pid}"),
|
|
namen_str.index(f"Z{pid}")
|
|
]
|
|
|
|
# EF lokal + SP lokal (3D)
|
|
EF = np.full(n, np.nan, float)
|
|
SP_loc_m = np.full(n, np.nan, float)
|
|
EFSP_loc_m = np.full(n, np.nan, float)
|
|
|
|
for i in range(n):
|
|
sp = standpunkte[i]
|
|
zp = zielpunkte[i]
|
|
|
|
blocks = []
|
|
idx = []
|
|
|
|
try:
|
|
if sp:
|
|
b = idx_xyz(sp)
|
|
blocks.append(b)
|
|
idx += b
|
|
if zp:
|
|
b = idx_xyz(zp)
|
|
blocks.append(b)
|
|
idx += b
|
|
except ValueError:
|
|
continue
|
|
|
|
if not blocks:
|
|
continue
|
|
|
|
idx = list(dict.fromkeys(idx)) # unique
|
|
|
|
# Δx_i aus Grenzstörung
|
|
dl = np.zeros((n, 1))
|
|
dl[i, 0] = GRZW[i]
|
|
dx = Qxx @ (A.T @ (P @ dl))
|
|
|
|
dx_loc = dx[idx, :]
|
|
Q_loc = Qxx[np.ix_(idx, idx)]
|
|
|
|
# EF lokal
|
|
EF2 = (dx_loc.T @ np.linalg.solve(Q_loc, dx_loc)).item() / (s0 ** 2)
|
|
EF[i] = np.sqrt(max(0.0, EF2))
|
|
|
|
# SP lokal 3D: max trace der 3x3 Punktblöcke
|
|
tr_list = [np.trace(Qxx[np.ix_(b, b)]) for b in blocks]
|
|
if not tr_list:
|
|
continue
|
|
|
|
sigmaPmax_loc = s0 * np.sqrt(max(tr_list))
|
|
SP_loc_m[i] = sigmaPmax_loc
|
|
EFSP_loc_m[i] = EF[i] * sigmaPmax_loc
|
|
|
|
ausgabe_zuv = pd.DataFrame({
|
|
"Beobachtung": labels,
|
|
"Stand-Pkt": standpunkte,
|
|
"Ziel-Pkt": zielpunkte,
|
|
"r_i": ri,
|
|
"EP_GF [mm]": EP_hat_m * 1000.0,
|
|
"EP_grzw [mm]": EP_grzw_m * 1000.0,
|
|
"EF": EF,
|
|
"SP_loc_3D [mm]": SP_loc_m * 1000.0,
|
|
"EF*SP_loc_3D [mm]": EFSP_loc_m * 1000.0,
|
|
})
|
|
return ausgabe_zuv
|