Merge commit 'd8e4449f1009bc03b167c0e5667413585b2b3e53' as 'core'
This commit is contained in:
606
core/backend/app/services/ai_prediction.py
Normal file
606
core/backend/app/services/ai_prediction.py
Normal file
@@ -0,0 +1,606 @@
|
||||
"""AI预测引擎 - 光伏发电、负荷、热泵COP预测与自发自用优化
|
||||
|
||||
Uses physics-based models from weather_model.py combined with statistical
|
||||
methods (moving averages, exponential smoothing, seasonal decomposition)
|
||||
to generate realistic forecasts. Inspired by Envision's 天枢能源大模型.
|
||||
"""
|
||||
|
||||
import math
|
||||
import logging
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
from sqlalchemy import select, func, and_
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.models.device import Device
|
||||
from app.models.energy import EnergyData
|
||||
from app.models.prediction import PredictionTask, PredictionResult, OptimizationSchedule
|
||||
from app.services.weather_model import (
|
||||
outdoor_temperature, solar_altitude, get_cloud_factor,
|
||||
pv_power as _physics_pv_power, get_pv_orientation,
|
||||
get_hvac_mode, MONTHLY_AVG_TEMP, MONTHLY_DIURNAL_SWING,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("ai_prediction")
|
||||
|
||||
# Beijing electricity TOU pricing (yuan/kWh) - simplified
|
||||
TOU_PRICE = {
|
||||
"peak": 1.2, # 10:00-15:00, 18:00-21:00
|
||||
"shoulder": 0.8, # 07:00-10:00, 15:00-18:00, 21:00-23:00
|
||||
"valley": 0.4, # 23:00-07:00
|
||||
}
|
||||
|
||||
|
||||
def _get_tou_price(hour: int) -> float:
|
||||
if 10 <= hour < 15 or 18 <= hour < 21:
|
||||
return TOU_PRICE["peak"]
|
||||
elif 7 <= hour < 10 or 15 <= hour < 18 or 21 <= hour < 23:
|
||||
return TOU_PRICE["shoulder"]
|
||||
else:
|
||||
return TOU_PRICE["valley"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# PV Power Forecasting
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def forecast_pv(
|
||||
db: AsyncSession,
|
||||
device_id: int,
|
||||
horizon_hours: int = 24,
|
||||
) -> list[dict]:
|
||||
"""Forecast PV generation for the next horizon_hours.
|
||||
|
||||
Combines physics-based solar model with historical pattern correction.
|
||||
Returns list of {timestamp, predicted_power_kw, confidence_lower, confidence_upper}.
|
||||
"""
|
||||
device = await db.get(Device, device_id)
|
||||
if not device:
|
||||
raise ValueError(f"Device {device_id} not found")
|
||||
|
||||
rated_power = device.rated_power or 110.0
|
||||
orientation = get_pv_orientation(device.code or "")
|
||||
|
||||
# Fetch recent historical data for pattern correction
|
||||
now = datetime.now(timezone.utc)
|
||||
lookback = now - timedelta(days=7)
|
||||
result = await db.execute(
|
||||
select(EnergyData.timestamp, EnergyData.value)
|
||||
.where(and_(
|
||||
EnergyData.device_id == device_id,
|
||||
EnergyData.data_type == "power",
|
||||
EnergyData.timestamp >= lookback,
|
||||
))
|
||||
.order_by(EnergyData.timestamp)
|
||||
)
|
||||
historical = result.all()
|
||||
|
||||
# Calculate hourly averages from history for bias correction
|
||||
hourly_actual: dict[int, list[float]] = {h: [] for h in range(24)}
|
||||
for ts, val in historical:
|
||||
beijing_h = (ts.hour + 8) % 24 if ts.tzinfo else ts.hour
|
||||
hourly_actual[beijing_h].append(val)
|
||||
|
||||
hourly_avg = {
|
||||
h: np.mean(vals) if vals else None
|
||||
for h, vals in hourly_actual.items()
|
||||
}
|
||||
|
||||
# Generate physics-based forecast with bias correction
|
||||
forecasts = []
|
||||
for h_offset in range(horizon_hours):
|
||||
target_utc = now + timedelta(hours=h_offset)
|
||||
target_utc = target_utc.replace(minute=0, second=0, microsecond=0)
|
||||
|
||||
# Physics model baseline
|
||||
base_power = _physics_pv_power(target_utc, rated_power=rated_power,
|
||||
orientation=orientation,
|
||||
device_code=device.code or "")
|
||||
|
||||
# Bias correction from recent history
|
||||
beijing_hour = (target_utc.hour + 8) % 24
|
||||
hist_avg = hourly_avg.get(beijing_hour)
|
||||
if hist_avg is not None and base_power > 0:
|
||||
# Blend: 70% physics + 30% historical pattern
|
||||
correction = hist_avg / max(base_power, 0.1)
|
||||
correction = max(0.7, min(1.3, correction))
|
||||
predicted = base_power * (0.7 + 0.3 * correction)
|
||||
else:
|
||||
predicted = base_power
|
||||
|
||||
# Confidence interval widens with forecast horizon
|
||||
uncertainty = 0.05 + 0.02 * h_offset # grows with time
|
||||
uncertainty = min(uncertainty, 0.40)
|
||||
margin = predicted * uncertainty
|
||||
conf_lower = max(0, predicted - margin)
|
||||
conf_upper = min(rated_power, predicted + margin)
|
||||
|
||||
forecasts.append({
|
||||
"timestamp": target_utc.isoformat(),
|
||||
"predicted_power_kw": round(predicted, 2),
|
||||
"confidence_lower": round(conf_lower, 2),
|
||||
"confidence_upper": round(conf_upper, 2),
|
||||
})
|
||||
|
||||
return forecasts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Load Forecasting
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def forecast_load(
|
||||
db: AsyncSession,
|
||||
device_id: Optional[int] = None,
|
||||
building_type: str = "office",
|
||||
horizon_hours: int = 24,
|
||||
) -> list[dict]:
|
||||
"""Forecast building electricity load.
|
||||
|
||||
Uses day-of-week patterns, hourly profiles, and seasonal temperature
|
||||
correlation. If device_id is None, forecasts aggregate campus load.
|
||||
"""
|
||||
now = datetime.now(timezone.utc)
|
||||
beijing_now = now + timedelta(hours=8)
|
||||
|
||||
# Fetch recent history for pattern calibration
|
||||
lookback = now - timedelta(days=14)
|
||||
conditions = [
|
||||
EnergyData.data_type == "power",
|
||||
EnergyData.timestamp >= lookback,
|
||||
]
|
||||
if device_id:
|
||||
conditions.append(EnergyData.device_id == device_id)
|
||||
else:
|
||||
# Aggregate all meters
|
||||
conditions.append(
|
||||
EnergyData.device_id.in_(
|
||||
select(Device.id).where(Device.device_type == "meter")
|
||||
)
|
||||
)
|
||||
|
||||
result = await db.execute(
|
||||
select(EnergyData.timestamp, EnergyData.value)
|
||||
.where(and_(*conditions))
|
||||
.order_by(EnergyData.timestamp)
|
||||
)
|
||||
historical = result.all()
|
||||
|
||||
# Build weekday/weekend hourly profiles from history
|
||||
weekday_profile: dict[int, list[float]] = {h: [] for h in range(24)}
|
||||
weekend_profile: dict[int, list[float]] = {h: [] for h in range(24)}
|
||||
for ts, val in historical:
|
||||
bj = ts + timedelta(hours=8) if ts.tzinfo else ts
|
||||
h = bj.hour
|
||||
if bj.weekday() >= 5:
|
||||
weekend_profile[h].append(val)
|
||||
else:
|
||||
weekday_profile[h].append(val)
|
||||
|
||||
# Default load profile if no history
|
||||
default_weekday = {
|
||||
0: 18, 1: 16, 2: 16, 3: 15, 4: 15, 5: 17, 6: 25, 7: 40,
|
||||
8: 55, 9: 60, 10: 62, 11: 58, 12: 45, 13: 58, 14: 62,
|
||||
15: 60, 16: 55, 17: 48, 18: 35, 19: 28, 20: 25, 21: 22, 22: 20, 23: 18,
|
||||
}
|
||||
default_weekend = {h: v * 0.5 for h, v in default_weekday.items()}
|
||||
|
||||
def _avg_or_default(profile, defaults, h):
|
||||
vals = profile.get(h, [])
|
||||
return float(np.mean(vals)) if vals else defaults[h]
|
||||
|
||||
forecasts = []
|
||||
for h_offset in range(horizon_hours):
|
||||
target_utc = now + timedelta(hours=h_offset)
|
||||
target_utc = target_utc.replace(minute=0, second=0, microsecond=0)
|
||||
bj = target_utc + timedelta(hours=8)
|
||||
hour = bj.hour
|
||||
is_weekend = bj.weekday() >= 5
|
||||
|
||||
if is_weekend:
|
||||
base_load = _avg_or_default(weekend_profile, default_weekend, hour)
|
||||
else:
|
||||
base_load = _avg_or_default(weekday_profile, default_weekday, hour)
|
||||
|
||||
# Temperature correction: HVAC adds load in extreme temps
|
||||
temp = outdoor_temperature(target_utc)
|
||||
if temp < 5:
|
||||
hvac_factor = 1.0 + 0.02 * (5 - temp)
|
||||
elif temp > 28:
|
||||
hvac_factor = 1.0 + 0.025 * (temp - 28)
|
||||
else:
|
||||
hvac_factor = 1.0
|
||||
hvac_factor = min(hvac_factor, 1.4)
|
||||
|
||||
predicted = base_load * hvac_factor
|
||||
|
||||
# Factory buildings have flatter profiles
|
||||
if building_type == "factory":
|
||||
predicted = predicted * 0.85 + base_load * 0.15
|
||||
|
||||
# Confidence interval
|
||||
uncertainty = 0.08 + 0.015 * h_offset
|
||||
uncertainty = min(uncertainty, 0.35)
|
||||
margin = predicted * uncertainty
|
||||
|
||||
forecasts.append({
|
||||
"timestamp": target_utc.isoformat(),
|
||||
"predicted_load_kw": round(predicted, 2),
|
||||
"confidence_lower": round(max(0, predicted - margin), 2),
|
||||
"confidence_upper": round(predicted + margin, 2),
|
||||
})
|
||||
|
||||
return forecasts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Heat Pump COP Prediction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def forecast_heatpump_cop(
|
||||
db: AsyncSession,
|
||||
device_id: int,
|
||||
horizon_hours: int = 24,
|
||||
) -> list[dict]:
|
||||
"""Predict heat pump COP based on outdoor temperature forecast.
|
||||
|
||||
COP model: COP = base_cop + 0.05 * (T_outdoor - 7), clamped [2.0, 5.5].
|
||||
Returns optimal operating windows ranked by expected COP.
|
||||
"""
|
||||
device = await db.get(Device, device_id)
|
||||
if not device:
|
||||
raise ValueError(f"Device {device_id} not found")
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
forecasts = []
|
||||
|
||||
for h_offset in range(horizon_hours):
|
||||
target_utc = now + timedelta(hours=h_offset)
|
||||
target_utc = target_utc.replace(minute=0, second=0, microsecond=0)
|
||||
bj = target_utc + timedelta(hours=8)
|
||||
|
||||
temp = outdoor_temperature(target_utc)
|
||||
mode = get_hvac_mode(bj.month)
|
||||
|
||||
# COP model (same as weather_model but deterministic for forecast)
|
||||
if mode in ("heating", "transition_spring", "transition_fall"):
|
||||
cop = 3.0 + 0.05 * (temp - 7)
|
||||
else: # cooling
|
||||
cop = 4.0 - 0.04 * (temp - 25)
|
||||
cop = max(2.0, min(5.5, cop))
|
||||
|
||||
# Estimated power at this COP
|
||||
rated = device.rated_power or 35.0
|
||||
# Load factor based on time and mode
|
||||
if mode == "heating":
|
||||
if 6 <= bj.hour < 9:
|
||||
load_factor = 0.85
|
||||
elif 9 <= bj.hour < 16:
|
||||
load_factor = 0.55
|
||||
elif 16 <= bj.hour < 22:
|
||||
load_factor = 0.75
|
||||
else:
|
||||
load_factor = 0.65
|
||||
elif mode == "cooling":
|
||||
if 11 <= bj.hour < 16:
|
||||
load_factor = 0.85
|
||||
elif 8 <= bj.hour < 11 or 16 <= bj.hour < 19:
|
||||
load_factor = 0.60
|
||||
else:
|
||||
load_factor = 0.25
|
||||
else:
|
||||
load_factor = 0.35
|
||||
|
||||
if bj.weekday() >= 5:
|
||||
load_factor *= 0.7
|
||||
|
||||
est_power = rated * load_factor
|
||||
electricity_price = _get_tou_price(bj.hour)
|
||||
operating_cost = est_power * electricity_price # yuan/h
|
||||
|
||||
forecasts.append({
|
||||
"timestamp": target_utc.isoformat(),
|
||||
"predicted_cop": round(cop, 2),
|
||||
"outdoor_temp": round(temp, 1),
|
||||
"estimated_power_kw": round(est_power, 2),
|
||||
"load_factor": round(load_factor, 2),
|
||||
"electricity_price": electricity_price,
|
||||
"operating_cost_yuan_h": round(operating_cost, 2),
|
||||
"mode": mode,
|
||||
})
|
||||
|
||||
return forecasts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Self-Consumption Optimization
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def optimize_self_consumption(
|
||||
db: AsyncSession,
|
||||
horizon_hours: int = 24,
|
||||
) -> dict:
|
||||
"""Compare predicted PV generation vs predicted load to find optimization
|
||||
opportunities. Recommends heat pump pre-heating during PV surplus.
|
||||
|
||||
Returns:
|
||||
- hourly comparison (pv vs load)
|
||||
- surplus/deficit periods
|
||||
- recommended heat pump schedule
|
||||
- expected savings
|
||||
"""
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
# Get all PV inverter device ids
|
||||
pv_result = await db.execute(
|
||||
select(Device).where(Device.device_type == "pv_inverter", Device.is_active == True)
|
||||
)
|
||||
pv_devices = pv_result.scalars().all()
|
||||
|
||||
# Aggregate PV forecast
|
||||
pv_total = [0.0] * horizon_hours
|
||||
for dev in pv_devices:
|
||||
pv_forecast = await forecast_pv(db, dev.id, horizon_hours)
|
||||
for i, f in enumerate(pv_forecast):
|
||||
pv_total[i] += f["predicted_power_kw"]
|
||||
|
||||
# Aggregate load forecast
|
||||
load_forecast = await forecast_load(db, device_id=None, horizon_hours=horizon_hours)
|
||||
|
||||
# Build hourly comparison
|
||||
hourly = []
|
||||
surplus_periods = []
|
||||
deficit_periods = []
|
||||
total_surplus_kwh = 0.0
|
||||
total_deficit_kwh = 0.0
|
||||
|
||||
for i in range(horizon_hours):
|
||||
target_utc = now + timedelta(hours=i)
|
||||
target_utc = target_utc.replace(minute=0, second=0, microsecond=0)
|
||||
bj = target_utc + timedelta(hours=8)
|
||||
|
||||
pv_kw = pv_total[i]
|
||||
load_kw = load_forecast[i]["predicted_load_kw"]
|
||||
balance = pv_kw - load_kw
|
||||
price = _get_tou_price(bj.hour)
|
||||
|
||||
entry = {
|
||||
"timestamp": target_utc.isoformat(),
|
||||
"hour": bj.hour,
|
||||
"pv_generation_kw": round(pv_kw, 2),
|
||||
"load_kw": round(load_kw, 2),
|
||||
"balance_kw": round(balance, 2),
|
||||
"electricity_price": price,
|
||||
}
|
||||
hourly.append(entry)
|
||||
|
||||
if balance > 2: # >2kW surplus threshold
|
||||
surplus_periods.append({"hour": bj.hour, "surplus_kw": round(balance, 2)})
|
||||
total_surplus_kwh += balance
|
||||
elif balance < -2:
|
||||
deficit_periods.append({"hour": bj.hour, "deficit_kw": round(-balance, 2)})
|
||||
total_deficit_kwh += (-balance)
|
||||
|
||||
# Generate heat pump optimization schedule
|
||||
# Strategy: shift heat pump load to PV surplus periods
|
||||
hp_schedule = []
|
||||
savings_kwh = 0.0
|
||||
savings_yuan = 0.0
|
||||
|
||||
for period in surplus_periods:
|
||||
hour = period["hour"]
|
||||
surplus = period["surplus_kw"]
|
||||
price = _get_tou_price(hour)
|
||||
|
||||
# Use surplus to pre-heat/pre-cool
|
||||
usable_power = min(surplus, 35.0) # cap at single HP rated power
|
||||
hp_schedule.append({
|
||||
"hour": hour,
|
||||
"action": "boost",
|
||||
"power_kw": round(usable_power, 2),
|
||||
"reason": "利用光伏余电预加热/预冷",
|
||||
})
|
||||
savings_kwh += usable_power
|
||||
savings_yuan += usable_power * price
|
||||
|
||||
# Also recommend reducing HP during peak-price deficit periods
|
||||
for period in deficit_periods:
|
||||
hour = period["hour"]
|
||||
price = _get_tou_price(hour)
|
||||
if price >= TOU_PRICE["peak"]:
|
||||
hp_schedule.append({
|
||||
"hour": hour,
|
||||
"action": "reduce",
|
||||
"power_kw": 0,
|
||||
"reason": "高电价时段降低热泵负荷",
|
||||
})
|
||||
# Estimate savings from reduced operation during peak
|
||||
savings_yuan += 5.0 * price # assume 5kW reduction
|
||||
|
||||
self_consumption_rate = 0.0
|
||||
total_pv = sum(pv_total)
|
||||
total_load = sum(f["predicted_load_kw"] for f in load_forecast)
|
||||
if total_pv > 0:
|
||||
self_consumed = min(total_pv, total_load)
|
||||
self_consumption_rate = self_consumed / total_pv * 100
|
||||
|
||||
return {
|
||||
"hourly": hourly,
|
||||
"surplus_periods": surplus_periods,
|
||||
"deficit_periods": deficit_periods,
|
||||
"hp_schedule": hp_schedule,
|
||||
"summary": {
|
||||
"total_pv_kwh": round(total_pv, 2),
|
||||
"total_load_kwh": round(total_load, 2),
|
||||
"total_surplus_kwh": round(total_surplus_kwh, 2),
|
||||
"total_deficit_kwh": round(total_deficit_kwh, 2),
|
||||
"self_consumption_rate": round(self_consumption_rate, 1),
|
||||
"potential_savings_kwh": round(savings_kwh, 2),
|
||||
"potential_savings_yuan": round(savings_yuan, 2),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Prediction Accuracy
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def get_prediction_accuracy(
|
||||
db: AsyncSession,
|
||||
prediction_type: Optional[str] = None,
|
||||
days: int = 7,
|
||||
) -> dict:
|
||||
"""Calculate prediction accuracy metrics (MAE, RMSE, MAPE) from
|
||||
historical predictions that have actual values filled in."""
|
||||
cutoff = datetime.now(timezone.utc) - timedelta(days=days)
|
||||
|
||||
conditions = [
|
||||
PredictionResult.actual_value.isnot(None),
|
||||
PredictionResult.timestamp >= cutoff,
|
||||
]
|
||||
if prediction_type:
|
||||
conditions.append(
|
||||
PredictionResult.task_id.in_(
|
||||
select(PredictionTask.id).where(
|
||||
PredictionTask.prediction_type == prediction_type
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
result = await db.execute(
|
||||
select(PredictionResult.predicted_value, PredictionResult.actual_value)
|
||||
.where(and_(*conditions))
|
||||
)
|
||||
pairs = result.all()
|
||||
|
||||
if not pairs:
|
||||
# Return mock accuracy for demo (simulating a well-tuned model)
|
||||
return {
|
||||
"sample_count": 0,
|
||||
"mae": 2.5,
|
||||
"rmse": 3.8,
|
||||
"mape": 8.5,
|
||||
"note": "使用模拟精度指标(无历史预测数据)",
|
||||
}
|
||||
|
||||
predicted = np.array([p[0] for p in pairs])
|
||||
actual = np.array([p[1] for p in pairs])
|
||||
|
||||
errors = predicted - actual
|
||||
mae = float(np.mean(np.abs(errors)))
|
||||
rmse = float(np.sqrt(np.mean(errors ** 2)))
|
||||
|
||||
# MAPE: only where actual > 0 to avoid division by zero
|
||||
mask = actual > 0.1
|
||||
if mask.any():
|
||||
mape = float(np.mean(np.abs(errors[mask] / actual[mask])) * 100)
|
||||
else:
|
||||
mape = 0.0
|
||||
|
||||
return {
|
||||
"sample_count": len(pairs),
|
||||
"mae": round(mae, 2),
|
||||
"rmse": round(rmse, 2),
|
||||
"mape": round(mape, 1),
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Run Prediction (creates task + results)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def run_prediction(
|
||||
db: AsyncSession,
|
||||
device_id: Optional[int],
|
||||
prediction_type: str,
|
||||
horizon_hours: int = 24,
|
||||
parameters: Optional[dict] = None,
|
||||
) -> PredictionTask:
|
||||
"""Execute a prediction and store results in the database."""
|
||||
task = PredictionTask(
|
||||
device_id=device_id,
|
||||
prediction_type=prediction_type,
|
||||
horizon_hours=horizon_hours,
|
||||
status="running",
|
||||
parameters=parameters or {},
|
||||
)
|
||||
db.add(task)
|
||||
await db.flush()
|
||||
|
||||
try:
|
||||
if prediction_type == "pv":
|
||||
if not device_id:
|
||||
raise ValueError("device_id required for PV forecast")
|
||||
forecasts = await forecast_pv(db, device_id, horizon_hours)
|
||||
for f in forecasts:
|
||||
db.add(PredictionResult(
|
||||
task_id=task.id,
|
||||
timestamp=f["timestamp"],
|
||||
predicted_value=f["predicted_power_kw"],
|
||||
confidence_lower=f["confidence_lower"],
|
||||
confidence_upper=f["confidence_upper"],
|
||||
unit="kW",
|
||||
))
|
||||
|
||||
elif prediction_type == "load":
|
||||
building_type = (parameters or {}).get("building_type", "office")
|
||||
forecasts = await forecast_load(db, device_id, building_type, horizon_hours)
|
||||
for f in forecasts:
|
||||
db.add(PredictionResult(
|
||||
task_id=task.id,
|
||||
timestamp=f["timestamp"],
|
||||
predicted_value=f["predicted_load_kw"],
|
||||
confidence_lower=f["confidence_lower"],
|
||||
confidence_upper=f["confidence_upper"],
|
||||
unit="kW",
|
||||
))
|
||||
|
||||
elif prediction_type == "heatpump":
|
||||
if not device_id:
|
||||
raise ValueError("device_id required for heat pump forecast")
|
||||
forecasts = await forecast_heatpump_cop(db, device_id, horizon_hours)
|
||||
for f in forecasts:
|
||||
db.add(PredictionResult(
|
||||
task_id=task.id,
|
||||
timestamp=f["timestamp"],
|
||||
predicted_value=f["predicted_cop"],
|
||||
confidence_lower=max(2.0, f["predicted_cop"] - 0.3),
|
||||
confidence_upper=min(5.5, f["predicted_cop"] + 0.3),
|
||||
unit="",
|
||||
))
|
||||
|
||||
elif prediction_type == "optimization":
|
||||
opt = await optimize_self_consumption(db, horizon_hours)
|
||||
# Store as optimization schedule
|
||||
now = datetime.now(timezone.utc)
|
||||
schedule = OptimizationSchedule(
|
||||
date=now.replace(hour=0, minute=0, second=0, microsecond=0),
|
||||
schedule_data=opt,
|
||||
expected_savings_kwh=opt["summary"]["potential_savings_kwh"],
|
||||
expected_savings_yuan=opt["summary"]["potential_savings_yuan"],
|
||||
status="pending",
|
||||
)
|
||||
db.add(schedule)
|
||||
# Also store hourly balance as prediction results
|
||||
for entry in opt["hourly"]:
|
||||
db.add(PredictionResult(
|
||||
task_id=task.id,
|
||||
timestamp=entry["timestamp"],
|
||||
predicted_value=entry["balance_kw"],
|
||||
unit="kW",
|
||||
))
|
||||
else:
|
||||
raise ValueError(f"Unknown prediction type: {prediction_type}")
|
||||
|
||||
task.status = "completed"
|
||||
task.completed_at = datetime.now(timezone.utc)
|
||||
|
||||
except Exception as e:
|
||||
task.status = "failed"
|
||||
task.error_message = str(e)
|
||||
logger.error(f"Prediction task {task.id} failed: {e}", exc_info=True)
|
||||
|
||||
return task
|
||||
Reference in New Issue
Block a user