Visible Learning: umfassende Erweiterungen
- Berechnung Kopplungspotenzial je Bedürfnis (Young/Roediger) - Export von coupling_potential_per_need.csv inkl. bridge_energy - Aggregation und Export von Kopplungsindizes (per Item, per Need) - Konsolidierung des Mappings Young ↔ Hattie (werte_mapping.csv) - Neue Visualisierungen: • Kopplungspotenzial (2D, farbkodiert nach Balance) • Item-Projektion mit Communities • 3D-Netzwerkdarstellung (Systemebenen × Thermometer) • 3D-Triangulation: Effekt × Bedürfnis × Semantik • Thermo-Dashboard (Energie, Entropie, Modularität) - Verbesserte Skalierungen (Effektstärken normiert, Markergrößen, Kantenstärken) - Konsistente Export-Pfade (export/*.csv, *.html, *.png) - Fehlerbehebungen: • os-Import nachgezogen • robustere Merge-Strategien beim CSV-Export • Schutz vor leeren/inkonsistenten Spalten - CI-Styling & Plotly-Template in alle neuen Plots integriert
This commit is contained in:
115
mapping young hattie.py
Normal file
115
mapping young hattie.py
Normal file
@ -0,0 +1,115 @@
|
||||
import os
|
||||
import pandas as pd
|
||||
# Pfad-Setup: expliziter Nutzerpfad + robuste Fallbacks
|
||||
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
PROJECT_DIR = SCRIPT_DIR # hier liegt die Datei selbst im Projektordner
|
||||
|
||||
# 1) Nutzerpfad (wie gewünscht)
|
||||
USER_SPEC_PATHS = [
|
||||
"Research/Eigene Forschungsprojekte/Visible Learning/Thermometer.csv",
|
||||
]
|
||||
|
||||
# 2) Lokale Projekt-Pfade
|
||||
LOCAL_SPEC_PATHS = [
|
||||
os.path.join(PROJECT_DIR, "Thermometer.csv"),
|
||||
]
|
||||
|
||||
# 3) Frühere Kandidaten für Abwärtskompatibilität
|
||||
LEGACY_CANDIDATES = [
|
||||
"thermometer.csv",
|
||||
"thermometer_young_mapping.csv",
|
||||
"visible-learning_thermometer.csv",
|
||||
]
|
||||
|
||||
EXPORT_PATH = "Research/Eigene Forschungsprojekte/Visible Learning/export"
|
||||
|
||||
def _resolve_path(p: str) -> str:
|
||||
"""Erlaube relative Pfade (vom Script- oder Arbeitsverzeichnis) und absolute Pfade."""
|
||||
if os.path.isabs(p):
|
||||
return p
|
||||
# erst relativ zum Script-Verzeichnis
|
||||
p1 = os.path.join(SCRIPT_DIR, p)
|
||||
if os.path.exists(p1):
|
||||
return p1
|
||||
# dann relativ zum aktuellen Arbeitsverzeichnis
|
||||
p2 = os.path.abspath(p)
|
||||
return p2
|
||||
|
||||
|
||||
def _load_csv():
|
||||
tried = []
|
||||
# 1) Nutzerpfad(e)
|
||||
for raw in USER_SPEC_PATHS:
|
||||
p = _resolve_path(raw)
|
||||
if os.path.exists(p):
|
||||
try:
|
||||
df = pd.read_csv(p)
|
||||
return df, p
|
||||
except Exception as e:
|
||||
tried.append((p, str(e)))
|
||||
# 2) Lokale Projektpfade
|
||||
for raw in LOCAL_SPEC_PATHS:
|
||||
p = _resolve_path(raw)
|
||||
if os.path.exists(p):
|
||||
try:
|
||||
df = pd.read_csv(p)
|
||||
return df, p
|
||||
except Exception as e:
|
||||
tried.append((p, str(e)))
|
||||
# 3) Legacy-Kandidaten
|
||||
for raw in LEGACY_CANDIDATES:
|
||||
p = _resolve_path(raw)
|
||||
if os.path.exists(p):
|
||||
try:
|
||||
df = pd.read_csv(p)
|
||||
return df, p
|
||||
except Exception as e:
|
||||
tried.append((p, str(e)))
|
||||
|
||||
msg_lines = ["Keine CSV gefunden. Bitte stelle sicher, dass eine Thermometer-CSV existiert."]
|
||||
msg_lines.append("Versuchte Pfade:")
|
||||
for p,e in tried:
|
||||
msg_lines.append(f" - {p} (Fehler: {e})")
|
||||
msg_lines.append("Gesucht wurden u.a. (Rangfolge):")
|
||||
for raw in USER_SPEC_PATHS + LOCAL_SPEC_PATHS + LEGACY_CANDIDATES:
|
||||
msg_lines.append(f" - {_resolve_path(raw)}")
|
||||
raise FileNotFoundError("\n".join(msg_lines))
|
||||
|
||||
# --------------------------------------------
|
||||
# WERTEDATEI (nur Zusatzfelder)
|
||||
# --------------------------------------------
|
||||
EXTRA_COLS = ["Effekt_abs", "Effekt_sign", "Effekt_norm01"]
|
||||
|
||||
def _build_extrafields(df: pd.DataFrame) -> pd.DataFrame:
|
||||
d = df.copy()
|
||||
d["Effekt_abs"] = d["Effektstärke"].abs()
|
||||
d["Effekt_sign"] = d["Effektstärke"].apply(lambda x: 1 if x > 0 else (-1 if x < 0 else 0))
|
||||
if d["Effekt_abs"].max() > 0:
|
||||
d["Effekt_norm01"] = d["Effekt_abs"] / d["Effekt_abs"].max()
|
||||
else:
|
||||
d["Effekt_norm01"] = 0.0
|
||||
return d[EXTRA_COLS]
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# Merge DataFrame with extras and export as CSV
|
||||
# -----------------------------------------------------------
|
||||
def _merge_and_export(df: pd.DataFrame, extras: pd.DataFrame):
|
||||
merged = pd.concat([df[["Thermometer_ID", "Stichwort", "Effektstärke", "Subkapitel", "Kapitelname", "Systemebene", "Young_Beduerfnis"]].reset_index(drop=True), extras.reset_index(drop=True)], axis=1)
|
||||
os.makedirs(EXPORT_PATH, exist_ok=True)
|
||||
out_path = os.path.join(EXPORT_PATH, "werte_mapping.csv")
|
||||
merged.to_csv(out_path, index=False, encoding="utf-8")
|
||||
print(f"✅ Mapping + Zusatzwerte exportiert: {out_path}")
|
||||
|
||||
def _export_csv(df: pd.DataFrame, filename: str):
|
||||
os.makedirs(EXPORT_PATH, exist_ok=True)
|
||||
out_path = os.path.join(EXPORT_PATH, filename)
|
||||
df.to_csv(out_path, index=False, encoding="utf-8")
|
||||
print(f"✅ Zusatz-Wertedatei exportiert: {out_path}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
df, src = _load_csv()
|
||||
print(f"Eingelesen von: {src}")
|
||||
extras = _build_extrafields(df)
|
||||
_export_csv(extras, "werte_extras.csv")
|
||||
|
||||
_merge_and_export(df, extras)
|
||||
Reference in New Issue
Block a user