Compare commits
8 Commits
load_forec
...
forecaster
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
afbcd81310 | ||
|
|
fd87257f37 | ||
|
|
876115cf6e | ||
|
|
f0e7c1338b | ||
|
|
8642a057f0 | ||
|
|
ce14d59d51 | ||
|
|
4727364048 | ||
|
|
666eb211a3 |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,7 @@
|
|||||||
|
from heat_pump import HeatPump
|
||||||
|
|
||||||
|
hp_master = HeatPump(device_name='hp_master', ip_address='10.0.0.10', port=502, excel_path="../modbus_registers/heat_pump_registers.xlsx")
|
||||||
|
|
||||||
|
state = hp_master.get_state()
|
||||||
|
|
||||||
|
print(state)
|
||||||
49
component_test_connectors/heat_pump_connection_sg_ready.py
Normal file
49
component_test_connectors/heat_pump_connection_sg_ready.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
from pymodbus.client import ModbusTcpClient
|
||||||
|
|
||||||
|
def switch_sg_ready_mode(ip, port, mode):
|
||||||
|
"""
|
||||||
|
Register 300: 1=BUS 0= Hardware Kontakte
|
||||||
|
Register 301 & 302:
|
||||||
|
0-0= Kein Offset
|
||||||
|
0-1 Boiler und Heizung Offset
|
||||||
|
1-1 Boiler Offset + E-Einsatz Sollwert Erhöht
|
||||||
|
1-0 SG EVU Sperre
|
||||||
|
:param ip:
|
||||||
|
:param mode:
|
||||||
|
'mode1' = [True, False, False] => SG Ready deactivated
|
||||||
|
'mode2' = [True, False, True] => SG ready activated for heatpump only
|
||||||
|
'mode3' = [True, True, True] => SG ready activated for heatpump and heat rod
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
client = ModbusTcpClient(ip, port=port)
|
||||||
|
if not client.connect():
|
||||||
|
print("Verbindung zur Wärmepumpe fehlgeschlagen.")
|
||||||
|
return
|
||||||
|
|
||||||
|
mode_code = None
|
||||||
|
if mode == 'mode1':
|
||||||
|
mode_code = [True, False, False]
|
||||||
|
elif mode == 'mode2':
|
||||||
|
mode_code = [True, False, True]
|
||||||
|
elif mode == 'mode3':
|
||||||
|
mode_code = [True, True, True]
|
||||||
|
else:
|
||||||
|
print('Uncorrect or no string for mode!')
|
||||||
|
|
||||||
|
try:
|
||||||
|
response_300 = client.write_coil(300, mode_code[0])
|
||||||
|
response_301 = client.write_coil(301, mode_code[1])
|
||||||
|
response_302 = client.write_coil(302, mode_code[2])
|
||||||
|
|
||||||
|
# Optional: Rückmeldungen prüfen
|
||||||
|
for addr, resp in zip([300, 301, 302], [response_300, response_301, response_302]):
|
||||||
|
if resp.isError():
|
||||||
|
print(f"Fehler beim Schreiben von Coil {addr}: {resp}")
|
||||||
|
else:
|
||||||
|
print(f"Coil {addr} erfolgreich geschrieben.")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
client.close()
|
||||||
|
|
||||||
|
if '__name__' == '__main__':
|
||||||
|
switch_sg_ready_mode(ip='10.0.0.10', port=502, mode='mode2')
|
||||||
213
data_base_operations/transform_old_db_to_new.py
Normal file
213
data_base_operations/transform_old_db_to_new.py
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
import os, re, math, time
|
||||||
|
from datetime import datetime, timezone, timedelta
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
from influxdb_client import InfluxDBClient, Point, WritePrecision
|
||||||
|
from influxdb_client.client.write_api import SYNCHRONOUS
|
||||||
|
from influxdb_client.rest import ApiException
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------
|
||||||
|
# CONFIG
|
||||||
|
# -----------------------
|
||||||
|
INFLUX_URL = "http://192.168.1.146:8086"
|
||||||
|
INFLUX_ORG = "allmende"
|
||||||
|
INFLUX_TOKEN = os.environ.get("INFLUX_TOKEN", "Cw_naEZyvJ3isiAh1P4Eq3TsjcHmzzDFS7SlbKDsS6ZWL04fMEYixWqtNxGThDdG27S9aW5g7FP9eiq5z1rsGA==")
|
||||||
|
|
||||||
|
SOURCE_BUCKET = "allmende_db"
|
||||||
|
TARGET_BUCKET = "allmende_db_v2"
|
||||||
|
|
||||||
|
MEASUREMENTS = [
|
||||||
|
"hp_master", "hp_slave", "pv_forecast", "sg_ready",
|
||||||
|
"solaredge_master", "solaredge_meter", "solaredge_slave", "wohnung_2_6"
|
||||||
|
]
|
||||||
|
|
||||||
|
START_DT = datetime(2025, 6, 1, tzinfo=timezone.utc)
|
||||||
|
STOP_DT = datetime.now(timezone.utc)
|
||||||
|
WINDOW = timedelta(days=1)
|
||||||
|
|
||||||
|
EXCEL_PATH = "../modbus_registers/heat_pump_registers.xlsx"
|
||||||
|
EXCEL_SHEET = "Register_Map"
|
||||||
|
|
||||||
|
BATCH_SIZE = 1000
|
||||||
|
MAX_RETRIES = 8
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------
|
||||||
|
# Helpers
|
||||||
|
# -----------------------
|
||||||
|
def normalize(s) -> str:
|
||||||
|
s = "" if s is None else str(s).strip()
|
||||||
|
return re.sub(r"\s+", " ", s)
|
||||||
|
|
||||||
|
def is_invalid_sentinel(v: float) -> bool:
|
||||||
|
return v in (-999.9, -999.0, 30000.0, 32767.0, 65535.0)
|
||||||
|
|
||||||
|
def ensure_bucket(client: InfluxDBClient, name: str):
|
||||||
|
bapi = client.buckets_api()
|
||||||
|
if bapi.find_bucket_by_name(name):
|
||||||
|
return
|
||||||
|
bapi.create_bucket(bucket_name=name, org=INFLUX_ORG, retention_rules=None)
|
||||||
|
|
||||||
|
def build_field_type_map_from_excel(path: str) -> dict[str, str]:
|
||||||
|
df = pd.read_excel(path, sheet_name=EXCEL_SHEET)
|
||||||
|
df = df[df["Register_Type"].astype(str).str.upper() == "IR"].copy()
|
||||||
|
df["Address"] = df["Address"].astype(int)
|
||||||
|
df["Description"] = df["Description"].fillna("").astype(str)
|
||||||
|
df["Tag_Name"] = df["Tag_Name"].fillna("").astype(str)
|
||||||
|
df["Data_Type"] = df["Data_Type"].fillna("").astype(str)
|
||||||
|
|
||||||
|
m: dict[str, str] = {}
|
||||||
|
for _, r in df.iterrows():
|
||||||
|
addr = int(r["Address"])
|
||||||
|
desc = normalize(r["Description"])
|
||||||
|
tag = normalize(r["Tag_Name"])
|
||||||
|
dtp = normalize(r["Data_Type"]).upper()
|
||||||
|
if tag:
|
||||||
|
m[tag] = dtp
|
||||||
|
old_key = normalize(f"{addr} - {desc}".strip(" -"))
|
||||||
|
if old_key:
|
||||||
|
m[old_key] = dtp
|
||||||
|
return m
|
||||||
|
|
||||||
|
def coerce_value_to_dtype(v, dtype: str):
|
||||||
|
if v is None:
|
||||||
|
return None
|
||||||
|
dtp = (dtype or "").upper()
|
||||||
|
|
||||||
|
if isinstance(v, (int, float)):
|
||||||
|
fv = float(v)
|
||||||
|
if math.isnan(fv) or math.isinf(fv):
|
||||||
|
return None
|
||||||
|
|
||||||
|
if dtp in ("BOOL", "BOOLEAN"):
|
||||||
|
if isinstance(v, bool): return v
|
||||||
|
if isinstance(v, (int, float)): return bool(int(v))
|
||||||
|
return None
|
||||||
|
|
||||||
|
if dtp.startswith("INT") or dtp.startswith("UINT"):
|
||||||
|
if isinstance(v, bool): return int(v)
|
||||||
|
if isinstance(v, (int, float)): return int(float(v))
|
||||||
|
return None
|
||||||
|
|
||||||
|
if dtp.startswith("FLOAT") or dtp in ("DOUBLE",):
|
||||||
|
if isinstance(v, bool): return float(int(v))
|
||||||
|
if isinstance(v, (int, float)): return float(v)
|
||||||
|
return None
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def write_with_retry(write_api, batch):
|
||||||
|
delay = 1.0
|
||||||
|
last_msg = ""
|
||||||
|
for _ in range(MAX_RETRIES):
|
||||||
|
try:
|
||||||
|
write_api.write(bucket=TARGET_BUCKET, org=INFLUX_ORG, record=batch)
|
||||||
|
return
|
||||||
|
except ApiException as e:
|
||||||
|
last_msg = getattr(e, "body", "") or str(e)
|
||||||
|
status = getattr(e, "status", None)
|
||||||
|
if "timeout" in last_msg.lower() or status in (429, 500, 502, 503, 504):
|
||||||
|
time.sleep(delay)
|
||||||
|
delay = min(delay * 2, 30)
|
||||||
|
continue
|
||||||
|
raise
|
||||||
|
raise RuntimeError(f"Write failed after {MAX_RETRIES} retries: {last_msg}")
|
||||||
|
|
||||||
|
def window_already_migrated(query_api, measurement: str, start: datetime, stop: datetime) -> bool:
|
||||||
|
# Prüft: gibt es im Zielbucket im Fenster mindestens 1 Punkt?
|
||||||
|
flux = f'''
|
||||||
|
from(bucket: "{TARGET_BUCKET}")
|
||||||
|
|> range(start: time(v: "{start.isoformat()}"), stop: time(v: "{stop.isoformat()}"))
|
||||||
|
|> filter(fn: (r) => r._measurement == "{measurement}")
|
||||||
|
|> limit(n: 1)
|
||||||
|
'''
|
||||||
|
tables = query_api.query(flux, org=INFLUX_ORG)
|
||||||
|
for t in tables:
|
||||||
|
if t.records:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def migrate_window(query_api, write_api, measurement: str,
|
||||||
|
start: datetime, stop: datetime,
|
||||||
|
type_map: dict[str, str],
|
||||||
|
do_type_cast: bool) -> int:
|
||||||
|
flux = f'''
|
||||||
|
from(bucket: "{SOURCE_BUCKET}")
|
||||||
|
|> range(start: time(v: "{start.isoformat()}"), stop: time(v: "{stop.isoformat()}"))
|
||||||
|
|> filter(fn: (r) => r._measurement == "{measurement}")
|
||||||
|
|> keep(columns: ["_time","_measurement","_field","_value"])
|
||||||
|
'''
|
||||||
|
tables = query_api.query(flux, org=INFLUX_ORG)
|
||||||
|
|
||||||
|
batch, written = [], 0
|
||||||
|
for table in tables:
|
||||||
|
for rec in table.records:
|
||||||
|
t = rec.get_time()
|
||||||
|
field = normalize(rec.get_field())
|
||||||
|
value = rec.get_value()
|
||||||
|
if value is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if do_type_cast:
|
||||||
|
dtp = type_map.get(field)
|
||||||
|
if dtp:
|
||||||
|
cv = coerce_value_to_dtype(value, dtp)
|
||||||
|
if cv is None:
|
||||||
|
continue
|
||||||
|
if isinstance(cv, (int, float)) and is_invalid_sentinel(float(cv)):
|
||||||
|
continue
|
||||||
|
value = cv
|
||||||
|
# kein Mapping -> unverändert schreiben
|
||||||
|
|
||||||
|
batch.append(Point(measurement).field(field, value).time(t, WritePrecision.NS))
|
||||||
|
|
||||||
|
if len(batch) >= BATCH_SIZE:
|
||||||
|
write_with_retry(write_api, batch)
|
||||||
|
written += len(batch)
|
||||||
|
batch = []
|
||||||
|
|
||||||
|
if batch:
|
||||||
|
write_with_retry(write_api, batch)
|
||||||
|
written += len(batch)
|
||||||
|
|
||||||
|
return written
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------
|
||||||
|
# Main
|
||||||
|
# -----------------------
|
||||||
|
def main():
|
||||||
|
if not INFLUX_TOKEN:
|
||||||
|
raise RuntimeError("INFLUX_TOKEN fehlt (Env-Var INFLUX_TOKEN setzen).")
|
||||||
|
|
||||||
|
with InfluxDBClient(url=INFLUX_URL, token=INFLUX_TOKEN, org=INFLUX_ORG, timeout=900_000) as client:
|
||||||
|
ensure_bucket(client, TARGET_BUCKET)
|
||||||
|
|
||||||
|
type_map = build_field_type_map_from_excel(EXCEL_PATH)
|
||||||
|
query_api = client.query_api()
|
||||||
|
write_api = client.write_api(write_options=SYNCHRONOUS)
|
||||||
|
|
||||||
|
for meas in MEASUREMENTS:
|
||||||
|
do_cast = meas in ("hp_master", "hp_slave")
|
||||||
|
cur, total = START_DT, 0
|
||||||
|
print(f"\n== {meas} (cast={'ON' if do_cast else 'OFF'}) ==")
|
||||||
|
|
||||||
|
while cur < STOP_DT:
|
||||||
|
nxt = min(cur + WINDOW, STOP_DT)
|
||||||
|
|
||||||
|
if window_already_migrated(query_api, meas, cur, nxt):
|
||||||
|
print(f"{cur.isoformat()} -> {nxt.isoformat()} : SKIP (existiert schon)")
|
||||||
|
cur = nxt
|
||||||
|
continue
|
||||||
|
|
||||||
|
n = migrate_window(query_api, write_api, meas, cur, nxt, type_map, do_cast)
|
||||||
|
total += n
|
||||||
|
print(f"{cur.isoformat()} -> {nxt.isoformat()} : {n} (gesamt {total})")
|
||||||
|
cur = nxt
|
||||||
|
|
||||||
|
print(f"== Fertig {meas}: {total} Punkte ==")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
Binary file not shown.
@@ -1,349 +0,0 @@
|
|||||||
# load_forecaster.py
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
"""
|
|
||||||
LoadForecaster: builds a 36-hour forecast at 15-min resolution from InfluxDB data.
|
|
||||||
|
|
||||||
- Data source: InfluxDB (Flux query provided by user)
|
|
||||||
- Target: House load = M_AC_real - I_AC_real
|
|
||||||
- Frequency: 15 minutes (changeable via init)
|
|
||||||
- Model: Keras (LSTM by default, pluggable)
|
|
||||||
- Persistence: Saves model (H5) and scaler (joblib)
|
|
||||||
|
|
||||||
Usage (example):
|
|
||||||
|
|
||||||
from load_forecaster import LoadForecaster
|
|
||||||
import tensorflow as tf
|
|
||||||
|
|
||||||
lf = LoadForecaster(
|
|
||||||
url="http://localhost:8086",
|
|
||||||
token="<YOUR_TOKEN>",
|
|
||||||
org="<YOUR_ORG>",
|
|
||||||
bucket="allmende_db",
|
|
||||||
agg_every="15m",
|
|
||||||
input_hours=72,
|
|
||||||
output_hours=36,
|
|
||||||
model_path="model/load_forecaster.h5",
|
|
||||||
scaler_path="model/scaler.joblib",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Train or retrain
|
|
||||||
lf.train_and_save(train_days=90, epochs=60)
|
|
||||||
|
|
||||||
# Load model and forecast
|
|
||||||
model = lf.load_model()
|
|
||||||
forecast_df = lf.get_15min_forecast(model)
|
|
||||||
print(forecast_df.head())
|
|
||||||
|
|
||||||
"""
|
|
||||||
from __future__ import annotations
|
|
||||||
import os
|
|
||||||
import math
|
|
||||||
import json
|
|
||||||
import warnings
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from typing import Optional, Tuple
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import pandas as pd
|
|
||||||
from influxdb_client import InfluxDBClient
|
|
||||||
from influxdb_client.client.warnings import MissingPivotFunction
|
|
||||||
from sklearn.preprocessing import StandardScaler
|
|
||||||
from sklearn.exceptions import NotFittedError
|
|
||||||
import joblib
|
|
||||||
|
|
||||||
# TensorFlow / Keras
|
|
||||||
import tensorflow as tf
|
|
||||||
from tensorflow.keras.models import Sequential, load_model
|
|
||||||
from tensorflow.keras.layers import LSTM, Dense, Dropout
|
|
||||||
from tensorflow.keras.callbacks import EarlyStopping
|
|
||||||
|
|
||||||
warnings.filterwarnings("ignore", category=MissingPivotFunction)
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class InfluxParams:
|
|
||||||
url: str
|
|
||||||
token: str
|
|
||||||
org: str
|
|
||||||
bucket: str = "allmende_db"
|
|
||||||
|
|
||||||
class LoadForecaster:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
url: str,
|
|
||||||
token: str,
|
|
||||||
org: str,
|
|
||||||
bucket: str = "allmende_db",
|
|
||||||
agg_every: str = "15m",
|
|
||||||
input_hours: int = 72,
|
|
||||||
output_hours: int = 36,
|
|
||||||
model_path: str = "model/load_forecaster.h5",
|
|
||||||
scaler_path: str = "model/scaler.joblib",
|
|
||||||
feature_config: Optional[dict] = None,
|
|
||||||
) -> None:
|
|
||||||
self.influx = InfluxParams(url=url, token=token, org=org, bucket=bucket)
|
|
||||||
self.agg_every = agg_every
|
|
||||||
self.input_steps = int((input_hours * 60) / self._freq_minutes(agg_every))
|
|
||||||
self.output_steps = int((output_hours * 60) / self._freq_minutes(agg_every))
|
|
||||||
self.model_path = model_path
|
|
||||||
self.scaler_path = scaler_path
|
|
||||||
self.feature_config = feature_config or {"use_temp": True, "use_time_cyc": True}
|
|
||||||
self._scaler: Optional[StandardScaler] = None
|
|
||||||
|
|
||||||
# Ensure model dir exists
|
|
||||||
os.makedirs(os.path.dirname(model_path), exist_ok=True)
|
|
||||||
|
|
||||||
# ---------------------------- Public API ---------------------------- #
|
|
||||||
def get_15min_forecast(self, model: tf.keras.Model) -> pd.DataFrame:
|
|
||||||
"""Create a 36-hour forecast at 15-min resolution using the latest data.
|
|
||||||
Assumes a StandardScaler has been fitted during training and saved.
|
|
||||||
The method uses the most recent input window from InfluxDB.
|
|
||||||
"""
|
|
||||||
# Pull just enough history for one input window
|
|
||||||
history_hours = math.ceil(self.input_steps * self._freq_minutes(self.agg_every) / 60)
|
|
||||||
df = self._query_and_prepare(range_hours=history_hours)
|
|
||||||
if len(df) < self.input_steps:
|
|
||||||
raise RuntimeError(f"Not enough data: need {self.input_steps} steps, got {len(df)}")
|
|
||||||
|
|
||||||
# Build features for the latest window
|
|
||||||
feats = self._build_features(df)
|
|
||||||
X_window = feats[-self.input_steps :]
|
|
||||||
|
|
||||||
# Load scaler
|
|
||||||
scaler = self._load_or_get_scaler()
|
|
||||||
X_scaled = scaler.transform(X_window)
|
|
||||||
|
|
||||||
# Predict
|
|
||||||
pred_scaled = model.predict(X_scaled[np.newaxis, ...], verbose=0)[0]
|
|
||||||
|
|
||||||
# Inverse transform only the target column (index 0 is Load)
|
|
||||||
# Reconstruct a full array to inverse_transform
|
|
||||||
inv = np.zeros((self.output_steps, X_scaled.shape[1]))
|
|
||||||
inv[:, 0] = pred_scaled
|
|
||||||
inv_full = scaler.inverse_transform(inv)
|
|
||||||
y_pred = inv_full[:, 0]
|
|
||||||
|
|
||||||
# Build forecast index
|
|
||||||
last_ts = df.index[-1]
|
|
||||||
freq = pd.tseries.frequencies.to_offset(self.agg_every)
|
|
||||||
idx = pd.date_range(last_ts + freq, periods=self.output_steps, freq=freq)
|
|
||||||
out = pd.DataFrame({"Forecast_Load": y_pred}, index=idx)
|
|
||||||
out.index.name = "timestamp"
|
|
||||||
return out
|
|
||||||
|
|
||||||
def train_and_save(
|
|
||||||
self,
|
|
||||||
train_days: int = 90,
|
|
||||||
epochs: int = 80,
|
|
||||||
batch_size: int = 128,
|
|
||||||
validation_split: float = 0.2,
|
|
||||||
learning_rate: float = 1e-3,
|
|
||||||
fine_tune: bool = False,
|
|
||||||
) -> tf.keras.Model:
|
|
||||||
"""Train (or fine-tune) a model from recent history and persist model + scaler."""
|
|
||||||
df = self._query_and_prepare(range_hours=24 * train_days)
|
|
||||||
feats = self._build_features(df)
|
|
||||||
|
|
||||||
# Prepare windows
|
|
||||||
X, y = self._make_windows(feats)
|
|
||||||
if len(X) < 10:
|
|
||||||
raise RuntimeError("Not enough windowed samples to train.")
|
|
||||||
|
|
||||||
# Fit scaler on full X
|
|
||||||
scaler = StandardScaler()
|
|
||||||
X_scaled = scaler.fit_transform(X)
|
|
||||||
self._scaler = scaler
|
|
||||||
joblib.dump(scaler, self.scaler_path)
|
|
||||||
|
|
||||||
# Build model (or load existing for fine-tune)
|
|
||||||
if fine_tune and os.path.exists(self.model_path):
|
|
||||||
model = load_model(self.model_path)
|
|
||||||
else:
|
|
||||||
model = self._build_default_model(input_dim=X.shape[1], output_dim=self.output_steps, lr=learning_rate)
|
|
||||||
|
|
||||||
# Train
|
|
||||||
es = EarlyStopping(monitor="val_loss", patience=10, restore_best_weights=True)
|
|
||||||
model.fit(
|
|
||||||
X_scaled.reshape((-1, self.input_steps, X.shape[1] // self.input_steps)),
|
|
||||||
y,
|
|
||||||
epochs=epochs,
|
|
||||||
batch_size=batch_size,
|
|
||||||
validation_split=validation_split,
|
|
||||||
callbacks=[es],
|
|
||||||
verbose=1,
|
|
||||||
)
|
|
||||||
|
|
||||||
model.save(self.model_path)
|
|
||||||
return model
|
|
||||||
|
|
||||||
# A convenience wrapper to be called from an external script once per day
|
|
||||||
def retrain_daily(self, train_days: int = 90, epochs: int = 40, fine_tune: bool = True) -> None:
|
|
||||||
self.train_and_save(train_days=train_days, epochs=epochs, fine_tune=fine_tune)
|
|
||||||
|
|
||||||
def load_model(self) -> tf.keras.Model:
|
|
||||||
if not os.path.exists(self.model_path):
|
|
||||||
raise FileNotFoundError(f"Model not found at {self.model_path}")
|
|
||||||
return load_model(self.model_path)
|
|
||||||
|
|
||||||
# ------------------------- Internals: Data ------------------------- #
|
|
||||||
def _query_and_prepare(self, range_hours: int) -> pd.DataFrame:
|
|
||||||
"""Query InfluxDB for the last `range_hours` and construct the Load series.
|
|
||||||
Expected fields (exactly as in DB):
|
|
||||||
- "40206 - M_AC_Power"
|
|
||||||
- "40210 - M_AC_Power_SF"
|
|
||||||
- "40083 - I_AC_Power"
|
|
||||||
- "40084 - I_AC_Power_SF"
|
|
||||||
- "300 - Aussentemperatur"
|
|
||||||
"""
|
|
||||||
start_str = f"-{range_hours}h"
|
|
||||||
flux = f'''
|
|
||||||
from(bucket: "{self.influx.bucket}")
|
|
||||||
|> range(start: {start_str})
|
|
||||||
|> filter(fn: (r) => r["_measurement"] == "solaredge_meter" or r["_measurement"] == "solaredge_master" or r["_measurement"] == "hp_master")
|
|
||||||
|> filter(fn: (r) => r["_field"] == "40206 - M_AC_Power" or r["_field"] == "40210 - M_AC_Power_SF" or r["_field"] == "40083 - I_AC_Power" or r["_field"] == "40084 - I_AC_Power_SF" or r["_field"] == "300 - Aussentemperatur")
|
|
||||||
|> aggregateWindow(every: {self.agg_every}, fn: mean, createEmpty: false)
|
|
||||||
|> yield(name: "mean")
|
|
||||||
'''
|
|
||||||
with InfluxDBClient(url=self.influx.url, token=self.influx.token, org=self.influx.org) as client:
|
|
||||||
tables = client.query_api().query_data_frame(flux)
|
|
||||||
|
|
||||||
# Concatenate if list of frames is returned
|
|
||||||
if isinstance(tables, list):
|
|
||||||
df = pd.concat(tables, ignore_index=True)
|
|
||||||
else:
|
|
||||||
df = tables
|
|
||||||
|
|
||||||
# Keep relevant columns and pivot
|
|
||||||
df = df[["_time", "_field", "_value"]]
|
|
||||||
df = df.pivot(index="_time", columns="_field", values="_value").reset_index()
|
|
||||||
df = df.rename(
|
|
||||||
columns={
|
|
||||||
"_time": "timestamp",
|
|
||||||
"40206 - M_AC_Power": "M_AC",
|
|
||||||
"40210 - M_AC_Power_SF": "M_SF",
|
|
||||||
"40083 - I_AC_Power": "I_AC",
|
|
||||||
"40084 - I_AC_Power_SF": "I_SF",
|
|
||||||
"300 - Aussentemperatur": "Temp",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
df = df.sort_values("timestamp").set_index("timestamp")
|
|
||||||
|
|
||||||
# Forward-fill reasonable gaps (e.g., scaler factors and temp)
|
|
||||||
df[["M_SF", "I_SF", "Temp"]] = df[["M_SF", "I_SF", "Temp"]].ffill()
|
|
||||||
|
|
||||||
# Apply scaling: real = value * 10^sf
|
|
||||||
df["I_AC_real"] = df["I_AC"] * np.power(10.0, df["I_SF"]).astype(float)
|
|
||||||
df["M_AC_real"] = df["M_AC"] * np.power(10.0, df["M_SF"]).astype(float)
|
|
||||||
|
|
||||||
# Compute load
|
|
||||||
df["Load"] = df["M_AC_real"] - df["I_AC_real"]
|
|
||||||
|
|
||||||
# Ensure regular 15-min grid
|
|
||||||
df = df.asfreq(self.agg_every)
|
|
||||||
df[["Load", "Temp"]] = df[["Load", "Temp"]].interpolate(limit_direction="both")
|
|
||||||
|
|
||||||
return df[["Load", "Temp"]]
|
|
||||||
|
|
||||||
def _build_features(self, df: pd.DataFrame) -> np.ndarray:
|
|
||||||
"""Create feature matrix: [Load, Temp?, sin/cos day, sin/cos dow]."""
|
|
||||||
feats = [df["Load"].values.reshape(-1, 1)]
|
|
||||||
|
|
||||||
if self.feature_config.get("use_temp", True):
|
|
||||||
feats.append(df["Temp"].values.reshape(-1, 1))
|
|
||||||
|
|
||||||
if self.feature_config.get("use_time_cyc", True):
|
|
||||||
idx = df.index
|
|
||||||
minute_of_day = (idx.hour * 60 + idx.minute).values.astype(float)
|
|
||||||
sod = 2 * np.pi * minute_of_day / (24 * 60)
|
|
||||||
dow = 2 * np.pi * idx.dayofweek.values.astype(float) / 7.0
|
|
||||||
feats.append(np.sin(sod).reshape(-1, 1))
|
|
||||||
feats.append(np.cos(sod).reshape(-1, 1))
|
|
||||||
feats.append(np.sin(dow).reshape(-1, 1))
|
|
||||||
feats.append(np.cos(dow).reshape(-1, 1))
|
|
||||||
|
|
||||||
X = np.hstack(feats) # shape: (T, n_features)
|
|
||||||
|
|
||||||
# Flatten windows to 2D for scaler fitting, but model expects 3D; we reshape later
|
|
||||||
return X
|
|
||||||
|
|
||||||
def _make_windows(self, X_2d: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
|
||||||
"""Create sliding windows: returns (X_flat, y) where X_flat stacks the windowed features.
|
|
||||||
For Keras we later reshape X_flat -> (N, input_steps, n_features).
|
|
||||||
"""
|
|
||||||
n = X_2d.shape[0]
|
|
||||||
n_features = X_2d.shape[1]
|
|
||||||
X_list, y_list = [], []
|
|
||||||
for i in range(n - self.input_steps - self.output_steps):
|
|
||||||
xw = X_2d[i : i + self.input_steps, :]
|
|
||||||
yw = X_2d[i + self.input_steps : i + self.input_steps + self.output_steps, 0] # target: Load
|
|
||||||
X_list.append(xw.reshape(-1)) # flatten
|
|
||||||
y_list.append(yw)
|
|
||||||
X_flat = np.stack(X_list)
|
|
||||||
y = np.stack(y_list)
|
|
||||||
return X_flat, y
|
|
||||||
|
|
||||||
# ----------------------- Internals: Modeling ----------------------- #
|
|
||||||
def _build_default_model(self, input_dim: int, output_dim: int, lr: float = 1e-3) -> tf.keras.Model:
|
|
||||||
n_features = input_dim // self.input_steps
|
|
||||||
model = Sequential([
|
|
||||||
LSTM(96, input_shape=(self.input_steps, n_features), return_sequences=False),
|
|
||||||
Dropout(0.1),
|
|
||||||
Dense(output_dim)
|
|
||||||
])
|
|
||||||
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr), loss="mse")
|
|
||||||
return model
|
|
||||||
|
|
||||||
def _load_or_get_scaler(self) -> StandardScaler:
|
|
||||||
if self._scaler is not None:
|
|
||||||
return self._scaler
|
|
||||||
if not os.path.exists(self.scaler_path):
|
|
||||||
raise NotFittedError("Scaler not found. Train the model first to create scaler.")
|
|
||||||
self._scaler = joblib.load(self.scaler_path)
|
|
||||||
return self._scaler
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _freq_minutes(spec: str) -> int:
|
|
||||||
# supports formats like "15m", "1h"
|
|
||||||
if spec.endswith("m"):
|
|
||||||
return int(spec[:-1])
|
|
||||||
if spec.endswith("h"):
|
|
||||||
return int(spec[:-1]) * 60
|
|
||||||
raise ValueError(f"Unsupported frequency spec: {spec}")
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------- retrain_daily.py -----------------------------
|
|
||||||
# A tiny script you can run once per day (e.g., via cron/systemd) to retrain the model.
|
|
||||||
# It delegates the work to LoadForecaster.retrain_daily().
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# Read credentials/config from env vars or fill here
|
|
||||||
URL = os.getenv("INFLUX_URL", "http://localhost:8086")
|
|
||||||
TOKEN = os.getenv("INFLUX_TOKEN", "<YOUR_TOKEN>")
|
|
||||||
ORG = os.getenv("INFLUX_ORG", "<YOUR_ORG>")
|
|
||||||
BUCKET = os.getenv("INFLUX_BUCKET", "allmende_db")
|
|
||||||
|
|
||||||
lf = LoadForecaster(
|
|
||||||
url=URL,
|
|
||||||
token=TOKEN,
|
|
||||||
org=ORG,
|
|
||||||
bucket=BUCKET,
|
|
||||||
agg_every="15m",
|
|
||||||
input_hours=72,
|
|
||||||
output_hours=36,
|
|
||||||
model_path=os.getenv("FORECASTER_MODEL", "model/load_forecaster.h5"),
|
|
||||||
scaler_path=os.getenv("FORECASTER_SCALER", "model/scaler.joblib"),
|
|
||||||
)
|
|
||||||
|
|
||||||
# One call per day is enough; decrease epochs for faster daily updates
|
|
||||||
lf.retrain_daily(train_days=int(os.getenv("TRAIN_DAYS", "120")), epochs=int(os.getenv("EPOCHS", "30")), fine_tune=True)
|
|
||||||
|
|
||||||
# Optionally, produce a fresh forecast right after training
|
|
||||||
try:
|
|
||||||
model = lf.load_model()
|
|
||||||
fc = lf.get_15min_forecast(model)
|
|
||||||
# Save latest forecast to CSV for dashboards/consumers
|
|
||||||
out_path = os.getenv("FORECAST_OUT", "model/latest_forecast_15min.csv")
|
|
||||||
os.makedirs(os.path.dirname(out_path), exist_ok=True)
|
|
||||||
fc.to_csv(out_path)
|
|
||||||
print(f"Saved forecast: {out_path}")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Forecast generation failed: {e}")
|
|
||||||
199
heat_pump.py
199
heat_pump.py
@@ -1,64 +1,173 @@
|
|||||||
from pymodbus.client import ModbusTcpClient
|
from pymodbus.client import ModbusTcpClient
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import time
|
import time
|
||||||
|
import struct
|
||||||
|
import math
|
||||||
|
|
||||||
|
|
||||||
class HeatPump:
|
class HeatPump:
|
||||||
def __init__(self, device_name: str, ip_address: str, port: int=502):
|
def __init__(self, device_name: str, ip_address: str, port: int = 502,
|
||||||
|
excel_path: str = "modbus_registers/heat_pump_registers.xlsx",
|
||||||
|
sheet_name: str = "Register_Map"):
|
||||||
self.device_name = device_name
|
self.device_name = device_name
|
||||||
self.ip = ip_address
|
self.ip = ip_address
|
||||||
self.port = port
|
self.port = port
|
||||||
self.client = None
|
self.client = ModbusTcpClient(self.ip, port=self.port)
|
||||||
self.connect_to_modbus()
|
|
||||||
self.registers = None
|
|
||||||
self.get_registers()
|
|
||||||
|
|
||||||
def connect_to_modbus(self):
|
self.excel_path = excel_path
|
||||||
port = self.port
|
self.sheet_name = sheet_name
|
||||||
self.client = ModbusTcpClient(self.ip, port=port)
|
self.registers = self.get_registers()
|
||||||
|
|
||||||
|
# -------------
|
||||||
|
# Connection
|
||||||
|
# -------------
|
||||||
|
def connect(self) -> bool:
|
||||||
|
ok = self.client.connect()
|
||||||
|
if not ok:
|
||||||
|
print("Verbindung zur Wärmepumpe fehlgeschlagen.")
|
||||||
|
return ok
|
||||||
|
|
||||||
|
def close(self):
|
||||||
try:
|
try:
|
||||||
if not self.client.connect():
|
|
||||||
print("Verbindung zur Wärmepumpe fehlgeschlagen.")
|
|
||||||
exit(1)
|
|
||||||
print("Verbindung zur Wärmepumpe erfolgreich.")
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
print("Beendet durch Benutzer (Ctrl+C).")
|
|
||||||
finally:
|
|
||||||
self.client.close()
|
self.client.close()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
def get_registers(self):
|
# -------------
|
||||||
# Excel-Datei mit den Input-Registerinformationen
|
# Excel parsing
|
||||||
excel_path = "modbus_registers/heat_pump_registers.xlsx"
|
# -------------
|
||||||
xls = pd.ExcelFile(excel_path)
|
def get_registers(self) -> dict:
|
||||||
df_input_registers = xls.parse('04 Input Register')
|
df = pd.read_excel(self.excel_path, sheet_name=self.sheet_name)
|
||||||
|
df = df[df["Register_Type"].astype(str).str.upper() == "IR"].copy()
|
||||||
|
|
||||||
# Relevante Spalten bereinigen
|
df["Address"] = df["Address"].astype(int)
|
||||||
df_clean = df_input_registers[['MB Adresse', 'Variable', 'Beschreibung', 'Variabel Typ']].dropna()
|
df["Length"] = df["Length"].astype(int)
|
||||||
df_clean['MB Adresse'] = df_clean['MB Adresse'].astype(int)
|
df["Data_Type"] = df["Data_Type"].astype(str).str.upper()
|
||||||
|
df["Byteorder"] = df["Byteorder"].astype(str).str.upper()
|
||||||
|
|
||||||
# Dictionary aus Excel erzeugen
|
df["Scaling"] = df.get("Scaling", 1.0)
|
||||||
self.registers = {
|
df["Scaling"] = df["Scaling"].fillna(1.0).astype(float)
|
||||||
row['MB Adresse']: {
|
|
||||||
'desc': row['Beschreibung'],
|
df["Offset"] = df.get("Offset", 0.0)
|
||||||
'type': 'REAL' if row['Variabel Typ'] == 'REAL' else 'INT'
|
df["Offset"] = df["Offset"].fillna(0.0).astype(float)
|
||||||
|
|
||||||
|
regs = {}
|
||||||
|
for _, row in df.iterrows():
|
||||||
|
regs[int(row["Address"])] = {
|
||||||
|
"length": int(row["Length"]),
|
||||||
|
"data_type": row["Data_Type"],
|
||||||
|
"byteorder": row["Byteorder"],
|
||||||
|
"scaling": float(row["Scaling"]),
|
||||||
|
"offset": float(row["Offset"]),
|
||||||
|
"tag": str(row.get("Tag_Name", "")).strip(),
|
||||||
|
"desc": "" if pd.isna(row.get("Description")) else str(row.get("Description")).strip(),
|
||||||
}
|
}
|
||||||
for _, row in df_clean.iterrows()
|
return regs
|
||||||
}
|
|
||||||
|
|
||||||
def get_state(self):
|
# -------------
|
||||||
data = {}
|
# Byteorder handling
|
||||||
data['Zeit'] = time.strftime('%Y-%m-%d %H:%M:%S')
|
# -------------
|
||||||
for address, info in self.registers.items():
|
@staticmethod
|
||||||
reg_type = info['type']
|
def _registers_to_bytes(registers: list[int], byteorder_code: str) -> bytes:
|
||||||
result = self.client.read_input_registers(address, count=2 if reg_type == 'REAL' else 1)
|
"""
|
||||||
if result.isError():
|
registers: Liste von uint16 (0..65535), wie pymodbus sie liefert.
|
||||||
print(f"Fehler beim Lesen von Adresse {address}: {result}")
|
byteorder_code: AB, ABCD, CDAB, BADC, DCBA (gemäß Template)
|
||||||
continue
|
Rückgabe: bytes in der Reihenfolge, wie sie für struct.unpack benötigt werden.
|
||||||
|
"""
|
||||||
|
code = (byteorder_code or "ABCD").upper()
|
||||||
|
|
||||||
if reg_type == 'REAL':
|
# Pro Register: 16-bit => zwei Bytes (MSB, LSB)
|
||||||
value = result.registers[0] / 10.0
|
words = [struct.pack(">H", r & 0xFFFF) for r in registers] # big endian pro Wort
|
||||||
else:
|
|
||||||
value = result.registers[0]
|
if len(words) == 1:
|
||||||
|
w = words[0] # b'\xAA\xBB'
|
||||||
|
if code in ("AB", "ABCD", "CDAB"):
|
||||||
|
return w
|
||||||
|
if code == "BADC": # byte swap
|
||||||
|
return w[::-1]
|
||||||
|
if code == "DCBA": # byte swap (bei 16-bit identisch zu BADC)
|
||||||
|
return w[::-1]
|
||||||
|
return w
|
||||||
|
|
||||||
|
# 32-bit (2 words) oder 64-bit (4 words): Word/Byte swaps abbilden
|
||||||
|
# words[0] = high word bytes, words[1] = low word bytes (in Modbus-Reihenfolge gelesen)
|
||||||
|
if code == "ABCD":
|
||||||
|
ordered = words
|
||||||
|
elif code == "CDAB":
|
||||||
|
# word swap
|
||||||
|
ordered = words[1:] + words[:1]
|
||||||
|
elif code == "BADC":
|
||||||
|
# byte swap innerhalb jedes Words
|
||||||
|
ordered = [w[::-1] for w in words]
|
||||||
|
elif code == "DCBA":
|
||||||
|
# word + byte swap
|
||||||
|
ordered = [w[::-1] for w in (words[1:] + words[:1])]
|
||||||
|
else:
|
||||||
|
ordered = words
|
||||||
|
|
||||||
|
return b"".join(ordered)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _decode_by_type(raw_bytes: bytes, data_type: str):
|
||||||
|
dt = (data_type or "").upper()
|
||||||
|
|
||||||
|
# struct: > = big endian, < = little endian
|
||||||
|
# Wir liefern raw_bytes bereits in der richtigen Reihenfolge; daher nutzen wir ">" konsistent.
|
||||||
|
if dt == "UINT16":
|
||||||
|
return struct.unpack(">H", raw_bytes[:2])[0]
|
||||||
|
if dt == "INT16":
|
||||||
|
return struct.unpack(">h", raw_bytes[:2])[0]
|
||||||
|
if dt == "UINT32":
|
||||||
|
return struct.unpack(">I", raw_bytes[:4])[0]
|
||||||
|
if dt == "INT32":
|
||||||
|
return struct.unpack(">i", raw_bytes[:4])[0]
|
||||||
|
if dt == "FLOAT32":
|
||||||
|
return struct.unpack(">f", raw_bytes[:4])[0]
|
||||||
|
if dt == "FLOAT64":
|
||||||
|
return struct.unpack(">d", raw_bytes[:8])[0]
|
||||||
|
|
||||||
|
raise ValueError(f"Unbekannter Data_Type: {dt}")
|
||||||
|
|
||||||
|
def _decode_value(self, registers: list[int], meta: dict):
|
||||||
|
raw = self._registers_to_bytes(registers, meta["byteorder"])
|
||||||
|
val = self._decode_by_type(raw, meta["data_type"])
|
||||||
|
return (val * meta["scaling"]) + meta["offset"]
|
||||||
|
|
||||||
|
# -------------
|
||||||
|
# Reading
|
||||||
|
# -------------
|
||||||
|
def get_state(self) -> dict:
|
||||||
|
data = {"Zeit": time.strftime("%Y-%m-%d %H:%M:%S")}
|
||||||
|
|
||||||
|
if not self.connect():
|
||||||
|
data["error"] = "connect_failed"
|
||||||
|
return data
|
||||||
|
|
||||||
|
try:
|
||||||
|
for address, meta in self.registers.items():
|
||||||
|
count = int(meta["length"])
|
||||||
|
result = self.client.read_input_registers(address, count=count)
|
||||||
|
if result.isError():
|
||||||
|
print(f"Fehler beim Lesen von Adresse {address}: {result}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
value = self._decode_value(result.registers, meta)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Decode-Fehler an Adresse {address} ({meta.get('tag','')}): {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Optional filter
|
||||||
|
# if self._is_invalid_sentinel(value):
|
||||||
|
# continue
|
||||||
|
value = float(value)
|
||||||
|
desc = meta.get("desc") or ""
|
||||||
|
field_name = f"{address} - {desc}".strip(" -")
|
||||||
|
data[field_name] = float(value)
|
||||||
|
|
||||||
|
print(f"Adresse {address} - {desc}: {value}")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
self.close()
|
||||||
|
|
||||||
print(f"Adresse {address} - {info['desc']}: {value}")
|
|
||||||
data[f"{address} - {info['desc']}"] = value
|
|
||||||
return data
|
return data
|
||||||
|
|||||||
2
main.py
2
main.py
@@ -23,7 +23,7 @@ db = DataBaseInflux(
|
|||||||
url="http://192.168.1.146:8086",
|
url="http://192.168.1.146:8086",
|
||||||
token="Cw_naEZyvJ3isiAh1P4Eq3TsjcHmzzDFS7SlbKDsS6ZWL04fMEYixWqtNxGThDdG27S9aW5g7FP9eiq5z1rsGA==",
|
token="Cw_naEZyvJ3isiAh1P4Eq3TsjcHmzzDFS7SlbKDsS6ZWL04fMEYixWqtNxGThDdG27S9aW5g7FP9eiq5z1rsGA==",
|
||||||
org="allmende",
|
org="allmende",
|
||||||
bucket="allmende_db"
|
bucket="allmende_db_v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
hp_master = HeatPump(device_name='hp_master', ip_address='10.0.0.10', port=502)
|
hp_master = HeatPump(device_name='hp_master', ip_address='10.0.0.10', port=502)
|
||||||
|
|||||||
BIN
modbus_registers/_modbus_register_template.xlsx
Normal file
BIN
modbus_registers/_modbus_register_template.xlsx
Normal file
Binary file not shown.
Binary file not shown.
BIN
modbus_registers/raw_register_tables/heat_pump_registers.xlsx
Normal file
BIN
modbus_registers/raw_register_tables/heat_pump_registers.xlsx
Normal file
Binary file not shown.
Binary file not shown.
Reference in New Issue
Block a user