diff --git a/.gitignore b/.gitignore
index 8bee251..68285be 100644
--- a/.gitignore
+++ b/.gitignore
@@ -90,4 +90,5 @@ Strategies/*
**/notes.md
# But keep this specific file
-!Strategies/Samplestrategy.py
\ No newline at end of file
+!Strategies/Samplestrategy.py
+!Strategies/Samplestrategyreport.py
\ No newline at end of file
diff --git a/Strategies/Samplestrategyreport.py b/Strategies/Samplestrategyreport.py
new file mode 100644
index 0000000..a07c2ab
--- /dev/null
+++ b/Strategies/Samplestrategyreport.py
@@ -0,0 +1,338 @@
+import pandas as pd
+import talib.abstract as ta
+
+from FeatureEngineering.Indicators import indicators as qtpylib
+from FeatureEngineering.MarketStructure.engine import MarketStructureEngine
+from core.reporting.core.context import ContextSpec
+from core.reporting.core.metrics import ExpectancyMetric, MaxDrawdownMetric
+from core.strategy.base import BaseStrategy
+from core.strategy.informatives import informative
+
+
+class Samplestrategyreport(BaseStrategy):
+
+ def __init__(
+ self,
+ df,
+ symbol,
+ startup_candle_count,
+ ):
+ super().__init__(
+ df=df,
+ symbol=symbol,
+ startup_candle_count=startup_candle_count,
+ )
+
+ strategy_config = {
+ "USE_TP1": True,
+ "USE_TP2": False,
+
+ "USE_TRAILING": True,
+ "TRAIL_FROM": "tp1", # "entry" | "tp1"
+
+ "TRAIL_MODE": "ribbon",
+ "SWING_LOOKBACK": 5,
+
+ "ALLOW_TP2_WITH_TRAILING": False,
+ }
+
+ @informative('M30')
+ def populate_indicators_M30(self, df: pd.DataFrame):
+
+ df['rma_33_low'] = qtpylib.rma(df, df['low'], 33)
+ df['rma_33_high'] = qtpylib.rma(df, df['high'], 33)
+
+ df['rma_144_low'] = qtpylib.rma(df, df['low'], 144)
+ df['rma_144_high'] = qtpylib.rma(df, df['high'], 144)
+
+ df["atr"] = ta.ATR(df, 14)
+
+ # --- market structure HTF
+ df = MarketStructureEngine.apply(
+ df,
+ features=[
+ "pivots",
+ "price_action",
+ "follow_through",
+ "structural_vol",
+ "trend_regime",
+ ],
+ )
+
+
+ return df
+
+ def populate_indicators(self) -> None:
+ df = self.df
+
+
+ df['atr'] = ta.ATR(df, 14)
+
+
+
+ df['rma_33_low'] = qtpylib.rma(df, df['low'], 33)
+ df['rma_33_high'] = qtpylib.rma(df, df['high'], 33)
+
+ df['rma_144_low'] = qtpylib.rma(df, df['low'], 144)
+ df['rma_144_high'] = qtpylib.rma(self.df, df['high'], 144)
+
+ df['sl_long'] =df['rma_33_low'] #df['close'] - (1 * df['atr'])
+ df['sl_short'] = df['rma_33_high'] #df['close'] + (1* df['atr'])
+
+ df['low_5'] = df['low'].rolling(5).min()
+ df['high_5'] = df['high'].rolling(5).max()
+ df['low_15'] = df['low'].rolling(15).min()
+ df['high_15'] = df['high'].rolling(15).max()
+
+ df['fast_rma_upper_than_slow'] = None
+ df['slow_rma_uprising'] = None
+ df['fast_rma_uprising'] = None
+
+ df['fast_rma_upper_than_slow'] = df['rma_33_low'] > df['rma_144_high']
+ df['fast_rma_lower_than_slow'] = df['rma_33_high'] < df['rma_144_low']
+
+ df['slow_rma_uprising'] = df['rma_144_low'] > df['rma_144_low'].shift(1)
+ df['fast_rma_uprising'] = df['rma_33_low'] > df['rma_33_low'].shift(1)
+
+ df = MarketStructureEngine.apply(
+ df,
+ features=[
+ "pivots",
+ "price_action",
+ "follow_through",
+ "structural_vol",
+ "trend_regime",
+ ],
+ )
+
+ self.df = df
+
+ def populate_entry_trend(self) -> None:
+ df = self.df
+ """
+ Buduje sygnały wejścia łączące:
+ - kierunek sesyjny (sessions_signal)
+ - kierunek dnia (prev_day_direction)
+ - bias rynkowy (session_bias)
+ - strefy HTF/LTF (OB, FVG, Breaker)
+ """
+
+
+
+ # --- 🔹 5. Maski logiczne ---
+ long_mask_rma_33 = (
+ (df['close'] > df['open']) &
+ (df['fast_rma_upper_than_slow']) & # HTF trend
+ (df['low'] <= df['rma_33_high']) & # pullback into ribbon
+ (df['close'] > df['rma_33_high']) & # rejection
+ (df['slow_rma_uprising'] ) & # trend still rising
+ (df['close'] > df['rma_144_low_M30'])
+ )
+
+ long_mask_rma_144 = (
+ (df['close'] > df['open']) &
+ (df['fast_rma_upper_than_slow']) & # HTF trend
+ (df['low'] <= df['rma_144_high']) & # pullback into ribbon
+ (df['close'] > df['rma_144_high']) & # rejection
+ (df['rma_144_low'] > df['rma_144_low'].shift(1)) # trend still rising
+ & (df['close'] > df['rma_144_low_M30'])
+ )
+
+ short_mask = (
+ (df['close'] < df['open']) &
+ (df['rma_33_high'] < df['rma_144_low']) & # trend down
+ (df['high'] >= df['rma_33_low']) & # pullback
+ (df['close'] < df['rma_33_low']) & # rejection
+ (df['rma_33_high'] < df['rma_33_high'].shift(1)) # falling impulse
+ & (df['close'] < df['rma_144_high_M30'])
+ )
+
+ short_mask_2 = (
+ (df['close'] < df['open']) &
+ (df['fast_rma_lower_than_slow']) & # trend down
+ (df['high'] >= df['rma_144_low']) & # pullback
+ (df['close'] < df['rma_144_low']) & # rejection
+ (df['rma_144_high'] < df['rma_144_high'].shift(1)) # falling impulse
+ & (df['close'] < df['rma_144_high_M30'])
+ )
+
+ df["signal_entry"] = None
+
+ idx = df.index[long_mask_rma_33]
+ df.loc[idx, "signal_entry"] = [{"direction": "long", "tag": "LONG SETUP 1"}] * len(idx)
+
+ idx = df.index[long_mask_rma_144]
+ df.loc[idx, "signal_entry"] = [{"direction": "long", "tag": "LONG SETUP 2"}] * len(idx)
+
+ idx = df.index[short_mask]
+ df.loc[idx, "signal_entry"] = [{"direction": "short", "tag": "SHORT SETUP 1"}] * len(idx)
+
+ idx = df.index[short_mask_2]
+ df.loc[idx, "signal_entry"] = [{"direction": "short", "tag": "SHORT SETUP 2"}] * len(idx)
+
+
+ # --- 🔹 7. Poziomy SL/TP ---
+ has_signals = df["signal_entry"].apply(bool)
+ df.loc[has_signals, "levels"] = df.loc[has_signals].apply(
+ lambda row: self.calculate_levels(row["signal_entry"], row),
+ axis=1
+ )
+
+
+
+ self.df = df
+
+
+
+ def populate_exit_trend(self):
+ self.df["signal_exit"] = None
+ self.df["custom_stop_loss"] = None
+
+ def compute_sl(
+ self,
+ *,
+ row,
+ direction,
+ min_atr_mult=0.5,
+ min_pct=0.001,
+ ):
+ """
+ Zwraca:
+ - sl_level
+ - sl_source: 'struct' | 'min'
+ """
+
+ close = row["close"]
+ atr = row["atr"]
+
+ # =========================
+ # SL STRUKTURALNY
+ # =========================
+
+ if direction == "long":
+ sl_structural = min(row["low_15"], row["low_5"]) - atr * 0.5
+ else:
+ sl_structural = max(row["high_15"], row["high_5"]) + atr * 0.5
+
+ # =========================
+ # MINIMALNY SL
+ # =========================
+
+ min_sl_atr = atr * min_atr_mult
+ min_sl_pct = close * min_pct
+ min_distance = max(min_sl_atr, min_sl_pct)
+
+ if direction == "long":
+ sl_min = close - min_distance
+
+ if sl_structural < sl_min:
+ return sl_structural, "struct"
+ else:
+ return sl_min, "min"
+
+ else:
+ sl_min = close + min_distance
+
+ if sl_structural > sl_min:
+ return sl_structural, "struct"
+ else:
+ return sl_min, "min"
+
+ def calculate_levels(self, signals, row):
+
+ if not isinstance(signals, dict):
+ return None
+
+ direction = signals["direction"]
+ close = row["close"]
+
+ sl, sl_source = self.compute_sl(
+ row=row,
+ direction=direction,
+ min_atr_mult=1,
+ min_pct=0.001
+ )
+
+ risk = abs(close - sl)
+
+ # ============================
+ # MICROSTRUCTURE-AWARE TP
+ # ============================
+
+ tp1_mult = 1
+ tp2_mult = 2
+
+ if direction == "long":
+ tp1_level = close + risk * tp1_mult
+ tp2_level = close + risk * tp2_mult
+ else:
+ tp1_level = close - risk * tp1_mult
+ tp2_level = close - risk * tp2_mult
+
+ return {
+ "SL": {
+ "level": sl,
+ "tag": f"{sl_source}"
+ },
+ "TP1": {
+ "level": tp1_level,
+ "tag": f"RR 1:{tp1_mult} from {sl_source}"
+ },
+ "TP2": {
+ "level": tp2_level,
+ "tag": f"RR 1:{tp2_mult} from {sl_source}"
+ },
+ }
+
+ def build_report_spec(self):
+
+ return (
+ super()
+ .build_report_spec()
+ .add_metric(ExpectancyMetric())
+ .add_metric(MaxDrawdownMetric())
+ .add_context(
+ ContextSpec(
+ name="trend regime HTF",
+ column="trend_regime_M30",
+ source="entry_candle"
+ )
+ )
+ .add_context(
+ ContextSpec(
+ name="trend regime LTF",
+ column="trend_regime",
+ source="entry_candle"
+ )
+ )
+
+ .add_context(
+ ContextSpec(
+ name="trend strength HTF",
+ column="trend_strength_M30",
+ source="entry_candle"
+ )
+ )
+ .add_context(
+ ContextSpec(
+ name="trend strength LTF",
+ column="trend_strength",
+ source="entry_candle"
+ )
+ )
+
+ .add_context(
+ ContextSpec(
+ name="trend bias HTF",
+ column="trend_bias_M30",
+ source="entry_candle"
+ )
+ )
+ .add_context(
+ ContextSpec(
+ name="trend bias LTF",
+ column="trend_bias",
+ source="entry_candle"
+ )
+ )
+ )
\ No newline at end of file
diff --git a/backtest_run.py b/backtest_run.py
index f973fce..d6b1937 100644
--- a/backtest_run.py
+++ b/backtest_run.py
@@ -4,7 +4,6 @@
if __name__ == "__main__":
import cProfile
- import pstats
from pathlib import Path
from datetime import datetime
diff --git a/config/backtest.py b/config/backtest.py
index 24610a5..a4faa09 100644
--- a/config/backtest.py
+++ b/config/backtest.py
@@ -2,7 +2,7 @@
from enum import Enum
from pathlib import Path
-from config.logger_config import LoggerConfig
+from core.logging.config import LoggerConfig
logging.basicConfig(level=logging.INFO)
@@ -121,7 +121,6 @@ class StdoutMode(str, Enum):
PLOT_ONLY = False # Skip backtest, just plots
SAVE_TRADES_CSV = False # Legacy / debug only
-from config.logger_config import LoggerConfig
LOGGER_CONFIG = LoggerConfig(
stdout=True,
diff --git a/config/live.py b/config/live.py
index 85a2297..61fb6cf 100644
--- a/config/live.py
+++ b/config/live.py
@@ -16,7 +16,7 @@
"SERVER": "SERVER",
}
-DRY_RUN = False
+DRY_RUN = True
TICK_INTERVAL_SEC = 1.0
@@ -24,9 +24,9 @@
# STRATEGY
# ==================================================
-STRATEGY_CLASS = "Samplestrategy"
+STRATEGY_CLASS = "Samplestrategyreport"
-SYMBOLS = "EURUSD"
+SYMBOLS = "BTCUSD"
TIMEFRAME = "M1"
diff --git a/config/logger_config.py b/config/logger_config.py
deleted file mode 100644
index 21f142e..0000000
--- a/config/logger_config.py
+++ /dev/null
@@ -1,137 +0,0 @@
-from dataclasses import dataclass
-from pathlib import Path
-import logging
-from contextlib import contextmanager
-from time import perf_counter
-import cProfile
-import pstats
-
-
-LOG_PREFIX = {
- "DATA": "📈 DATA PREPARER |",
- "BT": "🧪 BACKTEST |",
- "STRAT": "📐 STRATEGY |",
- "REPORT": "📊 REPORT |",
- "RUNNER": "🚀 RUN |",
-}
-
-@dataclass
-class LoggerConfig:
- stdout: bool = True
- file: bool = False
- timing: bool = True
- profiling: bool = False
- log_dir: Path | None = None
-
-
-class RunLogger:
- def __init__(self, name: str, cfg: LoggerConfig, prefix: str = ""):
- self.cfg = cfg
- self.name = name
- self.prefix = prefix
-
- self._t0 = perf_counter()
- self._t_last = self._t0
-
- self.logger = logging.getLogger(name)
- self.logger.setLevel(logging.INFO)
- self.logger.handlers.clear()
- self.logger.propagate = False
- self._timings: dict[str, float] = {}
-
- if cfg.stdout:
- h = logging.StreamHandler()
- h.setFormatter(logging.Formatter("%(message)s"))
- self.logger.addHandler(h)
-
- if cfg.file:
- if cfg.log_dir is None:
- raise ValueError("LoggerConfig.file=True requires log_dir")
-
- cfg.log_dir.mkdir(parents=True, exist_ok=True)
- path = cfg.log_dir / f"{name}.log"
-
- fh = logging.FileHandler(path, encoding="utf-8")
- fh.setFormatter(
- logging.Formatter("%(asctime)s | %(message)s")
- )
- self.logger.addHandler(fh)
-
- # --------------------
- # BASIC LOG
- # --------------------
- def log(self, msg: str):
- if self.prefix:
- self.logger.info(f"{self.prefix} {msg}")
- else:
- self.logger.info(msg)
-
- # --------------------
- # TIMED STEP
- # --------------------
- def step(self, label: str):
- if not self.cfg.timing:
- return
-
- now = perf_counter()
- delta = now - self._t_last
- total = now - self._t0
-
- msg = f"⏱️ {label:<30} +{delta:6.2f}s | total {total:6.2f}s"
- self.log(msg)
-
- self._t_last = now
-
- def get_timings(self) -> dict[str, float]:
- """
- Return collected timing sections.
- """
- return dict(self._timings)
-
- # --------------------
- # CONTEXT TIMER
- # --------------------
- @contextmanager
- def time(self, label: str):
- if not self.cfg.timing:
- yield
- return
-
- t0 = perf_counter()
- yield
- dt = perf_counter() - t0
- self.logger.info(f"⏱️ {label:<30} {dt:6.3f}s")
-
- @contextmanager
- def section(self, name: str):
- t0 = perf_counter()
- yield
- dt = perf_counter() - t0
- self._timings[name] = self._timings.get(name, 0.0) + dt
-
-
-@contextmanager
-def profiling(enabled: bool, path):
- if not enabled:
- yield
- return
-
- pr = cProfile.Profile()
- pr.enable()
- yield
- pr.disable()
- stats = pstats.Stats(pr)
- stats.sort_stats("cumtime")
- stats.dump_stats(path)
-
-
-class NullLogger:
- @contextmanager
- def section(self, name: str):
- yield
-
- def log(self, msg: str):
- pass
-
- def get_timings(self) -> dict[str, float]:
- return {}
\ No newline at end of file
diff --git a/core/backtesting/engine/worker.py b/core/backtesting/engine/worker.py
index c692343..1cf7961 100644
--- a/core/backtesting/engine/worker.py
+++ b/core/backtesting/engine/worker.py
@@ -1,9 +1,10 @@
import pandas as pd
-from config.logger_config import RunLogger, LoggerConfig
from core.backtesting.engine.backtester import Backtester
from core.backtesting.strategy_runner import strategy_orchestration
+from core.logging.config import LoggerConfig
+from core.logging.run_logger import RunLogger
def run_backtest_worker(
diff --git a/core/backtesting/runner.py b/core/backtesting/runner.py
index 782d368..2928039 100644
--- a/core/backtesting/runner.py
+++ b/core/backtesting/runner.py
@@ -2,11 +2,11 @@
from concurrent.futures import ProcessPoolExecutor, as_completed
from datetime import datetime
from pathlib import Path
+from time import perf_counter
from uuid import uuid4
import pandas as pd
-from config.logger_config import RunLogger, profiling
from config.report_config import ReportConfig, StdoutMode
from core.backtesting.backend_factory import create_backtest_backend
from core.backtesting.engine.backtester import Backtester
@@ -15,8 +15,10 @@
from core.backtesting.results_logic.result import BacktestResult
from core.backtesting.results_logic.store import ResultStore
from core.backtesting.strategy_runner import strategy_orchestration
-from core.data_provider import DefaultOhlcvDataProvider, CsvMarketDataCache
+from core.data_provider import BacktestStrategyDataProvider, CsvMarketDataCache
from core.live_trading.strategy_loader import load_strategy_class
+from core.logging.profiling import profiling
+from core.logging.run_logger import RunLogger
from core.reporting.runner import ReportRunner
from core.reporting.summary_runner import SummaryReportRunner
@@ -87,31 +89,26 @@ def load_data(self) -> dict[str, dict[str, pd.DataFrame]]:
start = pd.Timestamp(self.cfg.TIMERANGE["start"], tz="UTC")
end = pd.Timestamp(self.cfg.TIMERANGE["end"], tz="UTC")
- self.provider = DefaultOhlcvDataProvider(
+ self.provider = BacktestStrategyDataProvider(
backend=backend,
cache=CsvMarketDataCache(self.cfg.MARKET_DATA_PATH),
backtest_start=start,
backtest_end=end,
+ required_timeframes=all_tfs,
+ startup_candle_count=self.cfg.STARTUP_CANDLE_COUNT,
logger=self.log_data,
)
- all_data = {}
+ all_data: dict[str, dict[str, pd.DataFrame]] = {}
with self.log_data.time("load_all"):
for symbol in self.cfg.SYMBOLS:
- per_symbol = {}
- for tf in all_tfs:
- per_symbol[tf] = self.provider.get_ohlcv(
- symbol=symbol,
- timeframe=tf,
- start=start,
- end=end,
- )
- all_data[symbol] = per_symbol
+ all_data[symbol] = self.provider.fetch(symbol)
self.log_data.log(
f"summary | symbols={len(all_data)} timeframes={len(all_tfs)}"
)
+
return all_data
# ==================================================
# 2️⃣ STRATEGY EXECUTION
@@ -320,6 +317,9 @@ def _build_result(self) -> BacktestResult:
# ==================================================
def run(self):
+
+ t0 = perf_counter()
+
self.run_path = Path(
f"results/run_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
)
@@ -366,4 +366,7 @@ def run(self):
run_path=run_path,
).run()
+ total = perf_counter() - t0
+ self.log_run.log(f"TOTAL {total:,.3f}s")
+
self.log_run.log("finished")
diff --git a/core/backtesting/strategy_runner.py b/core/backtesting/strategy_runner.py
index d2235ec..475a790 100644
--- a/core/backtesting/strategy_runner.py
+++ b/core/backtesting/strategy_runner.py
@@ -3,7 +3,8 @@
import pandas as pd
-from config.logger_config import RunLogger, NullLogger
+from core.logging.null_logger import NullLogger
+from core.logging.run_logger import RunLogger
from core.strategy.orchestration.informatives import apply_informatives
from core.strategy.plan_builder import PlanBuildContext
from core.utils.timeframe import tf_to_minutes
diff --git a/core/data_provider/__init__.py b/core/data_provider/__init__.py
index ffb92ca..41ed177 100644
--- a/core/data_provider/__init__.py
+++ b/core/data_provider/__init__.py
@@ -1,13 +1,13 @@
from core.data_provider.cache.csv_cache import CsvMarketDataCache
from core.data_provider.contracts import MarketDataBackend
-from core.data_provider.providers.default_provider import DefaultOhlcvDataProvider
+from core.data_provider.providers.default_provider import BacktestStrategyDataProvider
from core.data_provider.errors import DataNotAvailable
__all__ = [
"MarketDataBackend",
"CsvMarketDataCache",
- "DefaultOhlcvDataProvider",
+ "BacktestStrategyDataProvider",
]
diff --git a/core/data_provider/clients/mt5_client.py b/core/data_provider/clients/mt5_client.py
index ee8435b..f04290d 100644
--- a/core/data_provider/clients/mt5_client.py
+++ b/core/data_provider/clients/mt5_client.py
@@ -1,19 +1,14 @@
import pandas as pd
import MetaTrader5 as mt5
-from core.data_provider.contracts import MarketDataProvider
+from core.data_provider.contracts import LiveMarketDataClient
from core.utils.lookback import LOOKBACK_CONFIG
from core.utils.timeframe import MT5_TIMEFRAME_MAP
-class MT5Client(MarketDataProvider):
+class MT5Client(LiveMarketDataClient):
- def __init__(
- self,
- *,
- bars_per_tf: dict[str, int]):
- self.bars_per_tf = bars_per_tf
@staticmethod
def _fetch_ohlcv(
diff --git a/core/data_provider/contracts.py b/core/data_provider/contracts.py
index e93642c..5765225 100644
--- a/core/data_provider/contracts.py
+++ b/core/data_provider/contracts.py
@@ -15,8 +15,25 @@ def fetch_ohlcv(
) -> pd.DataFrame:
...
+class CsvMarketDataCache(Protocol):
+ def coverage(self, *, symbol: str, timeframe: str): ...
+ def load_range(self, *, symbol: str, timeframe: str, start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame: ...
+ def save(self, *, symbol: str, timeframe: str, df: pd.DataFrame) -> None: ...
+ def append(self, *, symbol: str, timeframe: str, df: pd.DataFrame) -> None: ...
+
+
+class StrategyDataProvider(Protocol):
+ """
+ Strategy-level data contract.
+ """
+ def fetch(self, symbol: str) -> dict[str, pd.DataFrame]:
+ ...
+
-class MarketDataProvider(ABC):
+class LiveMarketDataClient(ABC):
+ """
+ Low-level live market data client.
+ """
@abstractmethod
def get_ohlcv(
@@ -26,15 +43,4 @@ def get_ohlcv(
timeframe: str,
bars: int,
) -> pd.DataFrame:
- pass
-
-
-class CsvMarketDataCache(Protocol):
- def coverage(self, *, symbol: str, timeframe: str): ...
- def load_range(self, *, symbol: str, timeframe: str, start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame: ...
- def save(self, *, symbol: str, timeframe: str, df: pd.DataFrame) -> None: ...
- def append(self, *, symbol: str, timeframe: str, df: pd.DataFrame) -> None: ...
-
-
-class OhlcvProvider(Protocol):
- def get_ohlcv(self, *, symbol: str, timeframe: str, start: pd.Timestamp, end: pd.Timestamp,) -> pd.DataFrame: ...
\ No newline at end of file
+ ...
\ No newline at end of file
diff --git a/core/data_provider/providers/backtest_provider.py b/core/data_provider/providers/backtest_provider.py
new file mode 100644
index 0000000..e69de29
diff --git a/core/data_provider/providers/default_provider.py b/core/data_provider/providers/default_provider.py
index 135bbe7..f17d1cc 100644
--- a/core/data_provider/providers/default_provider.py
+++ b/core/data_provider/providers/default_provider.py
@@ -2,12 +2,11 @@
import pandas as pd
-from config.logger_config import RunLogger
from core.data_provider.ohlcv_schema import sort_and_deduplicate, ensure_utc_time
from core.utils.timeframe import timeframe_to_pandas_freq
-class DefaultOhlcvDataProvider:
+class BacktestStrategyDataProvider:
"""
BACKTEST OHLCV provider.
@@ -18,18 +17,22 @@ class DefaultOhlcvDataProvider:
"""
def __init__(
- self,
- *,
- backend,
- cache,
- backtest_start: pd.Timestamp,
- backtest_end: pd.Timestamp,
- logger
+ self,
+ *,
+ backend,
+ cache,
+ backtest_start: pd.Timestamp,
+ backtest_end: pd.Timestamp,
+ required_timeframes: list[str],
+ startup_candle_count: int,
+ logger,
):
self.backend = backend
self.cache = cache
self.backtest_start = self._to_utc(backtest_start)
self.backtest_end = self._to_utc(backtest_end)
+ self.required_timeframes = required_timeframes
+ self.startup_candle_count = startup_candle_count
self.logger = logger
# -------------------------------------------------
@@ -72,7 +75,7 @@ def shift_time_by_candles(
# Main API
# -------------------------------------------------
- def get_ohlcv(
+ def _get_ohlcv(
self,
*,
symbol: str,
@@ -185,7 +188,7 @@ def get_ohlcv(
# Informative data
# -------------------------------------------------
- def get_informative_df(
+ def _get_informative_df(
self,
*,
symbol: str,
@@ -207,7 +210,7 @@ def get_informative_df(
candles=startup_candle_count,
)
- df = self.get_ohlcv(
+ df = self._get_ohlcv(
symbol=symbol,
timeframe=timeframe,
start=extended_start,
@@ -215,3 +218,26 @@ def get_informative_df(
)
return df.copy()
+
+ def fetch(self, symbol: str) -> dict[str, pd.DataFrame]:
+ """
+ Strategy-level data fetch for BACKTEST.
+
+ Returns:
+ data_by_tf: dict[timeframe, DataFrame]
+ """
+
+ data: dict[str, pd.DataFrame] = {}
+
+ # strategia deklaruje wymagane TF
+ # (dokładnie jak w backteście dziś)
+ # np. runner już to wie
+ for tf in self.required_timeframes:
+ df = self._get_informative_df(
+ symbol=symbol,
+ timeframe=tf,
+ startup_candle_count=self.startup_candle_count,
+ )
+ data[tf] = df
+
+ return data
diff --git a/core/data_provider/providers/live_provider.py b/core/data_provider/providers/live_provider.py
new file mode 100644
index 0000000..bf83b27
--- /dev/null
+++ b/core/data_provider/providers/live_provider.py
@@ -0,0 +1,33 @@
+import pandas as pd
+
+from core.data_provider.contracts import LiveMarketDataClient
+
+
+class LiveStrategyDataProvider:
+ """
+ Strategy-level provider for LIVE trading.
+
+ Adapts any LiveMarketDataClient to StrategyDataProvider.
+ """
+
+ def __init__(
+ self,
+ *,
+ client: LiveMarketDataClient,
+ bars_per_tf: dict[str, int],
+ ):
+ self.client = client
+ self.bars_per_tf = bars_per_tf
+
+ def fetch(self, symbol: str) -> dict[str, pd.DataFrame]:
+ data: dict[str, pd.DataFrame] = {}
+
+ for tf, bars in self.bars_per_tf.items():
+ df = self.client.get_ohlcv(
+ symbol=symbol,
+ timeframe=tf,
+ bars=bars,
+ )
+ data[tf] = df
+
+ return data
\ No newline at end of file
diff --git a/core/data_provider/tests/test_default_provider.py b/core/data_provider/tests/test_default_provider.py
index 3cd9d05..7e48b23 100644
--- a/core/data_provider/tests/test_default_provider.py
+++ b/core/data_provider/tests/test_default_provider.py
@@ -1,9 +1,10 @@
import pandas as pd
-from config.logger_config import NullLogger
from core.data_provider import CsvMarketDataCache
-from core.data_provider.providers.default_provider import DefaultOhlcvDataProvider
+from core.data_provider.providers.default_provider import BacktestStrategyDataProvider
+from core.logging.null_logger import NullLogger
+
def test_no_cache_fetches_and_saves(tmp_path, utc):
@@ -19,21 +20,24 @@ def test_no_cache_fetches_and_saves(tmp_path, utc):
("EURUSD", "M1", start, end): df_full,
})
- p = DefaultOhlcvDataProvider(
+ p = BacktestStrategyDataProvider(
backend=backend,
cache=cache,
backtest_start=start,
backtest_end=end,
+ required_timeframes=["M1"],
+ startup_candle_count=0,
logger=NullLogger(),
)
- out = p.get_ohlcv(symbol="EURUSD", timeframe="M1", start=start, end=end)
+ out = p.fetch(symbol="EURUSD")
+ df = out["M1"]
# assert
assert len(backend.calls) == 1
- assert out["time"].is_monotonic_increasing
- assert out["time"].dt.tz is not None
- assert out["close"].iloc[-1] == 999
+ assert df["time"].is_monotonic_increasing
+ assert df["time"].dt.tz is not None
+ assert df["close"].iloc[-1] == 999
def test_missing_before_fetches_pre_and_appends(tmp_path, utc):
@@ -49,24 +53,29 @@ def test_missing_before_fetches_pre_and_appends(tmp_path, utc):
end = utc("2022-01-01 00:10:00")
from core.data_provider.tests.conftest import FakeBackend, make_ohlcv
- df_pre = make_ohlcv("2022-01-01 00:00:00", periods=5, freq="1min") # 00:00..00:04
+ df_pre = make_ohlcv("2022-01-01 00:00:00", periods=5, freq="1min")
backend = FakeBackend({
("EURUSD", "M1", utc("2022-01-01 00:00:00"), utc("2022-01-01 00:05:00")): df_pre
})
- from core.data_provider.providers.default_provider import DefaultOhlcvDataProvider
- p = DefaultOhlcvDataProvider(
- backend=backend, cache=cache,
- backtest_start=start, backtest_end=end,
+ from core.data_provider.providers.default_provider import BacktestStrategyDataProvider
+ p = BacktestStrategyDataProvider(
+ backend=backend,
+ cache=cache,
+ backtest_start=start,
+ backtest_end=end,
+ required_timeframes=["M1"],
+ startup_candle_count=0,
logger=NullLogger(),
)
- out = p.get_ohlcv(symbol="EURUSD", timeframe="M1", start=start, end=end)
+ out = p.fetch(symbol="EURUSD")
+ df = out["M1"]
assert backend.calls == [("EURUSD","M1", utc("2022-01-01 00:00:00"), utc("2022-01-01 00:05:00"))]
- assert out["time"].min() == utc("2022-01-01 00:00:00")
- assert out["time"].max() == utc("2022-01-01 00:10:00")
+ assert df["time"].min() == utc("2022-01-01 00:00:00")
+ assert df["time"].max() == utc("2022-01-01 00:10:00")
def test_missing_after_fetches_post_and_appends(tmp_path, utc):
@@ -89,14 +98,19 @@ def test_missing_after_fetches_post_and_appends(tmp_path, utc):
("EURUSD", "M1", utc("2022-01-01 00:05:00"), utc("2022-01-01 00:10:00")): df_post
})
- from core.data_provider.providers.default_provider import DefaultOhlcvDataProvider
- p = DefaultOhlcvDataProvider(
- backend=backend, cache=cache,
- backtest_start=start, backtest_end=end,
+ from core.data_provider.providers.default_provider import BacktestStrategyDataProvider
+ p = BacktestStrategyDataProvider(
+ backend=backend,
+ cache=cache,
+ backtest_start=start,
+ backtest_end=end,
+ required_timeframes=["M1"],
+ startup_candle_count=0,
logger=NullLogger(),
)
- out = p.get_ohlcv(symbol="EURUSD", timeframe="M1", start=start, end=end)
+ out = p.fetch(symbol="EURUSD")
+ df = out["M1"]
assert backend.calls == [("EURUSD","M1", utc("2022-01-01 00:05:00"), utc("2022-01-01 00:10:00"))]
- assert out["time"].max() == utc("2022-01-01 00:10:00")
\ No newline at end of file
+ assert df["time"].max() == utc("2022-01-01 00:10:00")
\ No newline at end of file
diff --git a/core/live_trading/engine.py b/core/live_trading/engine.py
index 61f8753..bc4f491 100644
--- a/core/live_trading/engine.py
+++ b/core/live_trading/engine.py
@@ -3,47 +3,33 @@
from datetime import datetime
import time
-from core.live_trading.strategy_adapter import LiveStrategyAdapter
+from core.live_trading.strategy_runner import LiveStrategyRunner
class LiveEngine:
"""
Live trading engine.
- Tick loop:
- - always run exit logic
- - on new candle: update strategy and maybe execute entry
"""
def __init__(
self,
*,
position_manager,
- market_data_provider,
- strategy_adapter: LiveStrategyAdapter,
+ market_state_provider,
+ strategy_runner,
tick_interval_sec: float = 1.0,
):
self.position_manager = position_manager
- self.market_data_provider = market_data_provider
- self.strategy_adapter = strategy_adapter
+ self.market_state_provider = market_state_provider
+ self.strategy_runner = strategy_runner
self.tick_interval_sec = tick_interval_sec
self._running = False
- self._last_candle_time = None
-
- # keep last candle-derived state for tick-based management
- self._last_strategy_row = None
+ self._last_strategy_state = None
def start(self):
self._running = True
print("🟢 LiveEngine started")
- self._run_loop()
-
- def stop(self):
- self._running = False
- print("🔴 LiveEngine stopped")
-
- def _run_loop(self):
- last_heartbeat = time.time()
while self._running:
try:
@@ -51,51 +37,30 @@ def _run_loop(self):
except Exception as e:
print(f"❌ Engine error: {type(e).__name__}: {e}")
- if time.time() - last_heartbeat > 30:
- print("💓 Engine alive")
- last_heartbeat = time.time()
-
time.sleep(self.tick_interval_sec)
+ def stop(self):
+ self._running = False
+ print("🔴 LiveEngine stopped")
+
def _tick(self):
- market_state = self.market_data_provider()
+ market_state = self.market_state_provider.poll()
if market_state is None:
return
- market_state.setdefault("time", datetime.utcnow())
-
- # -----------------------------------------
- # Inject last strategy management signals
- # -----------------------------------------
- if self._last_strategy_row is not None:
- # these keys are optional in your DF
- se = self._last_strategy_row.get("signal_exit")
- csl = self._last_strategy_row.get("custom_stop_loss")
-
- if isinstance(se, dict):
- market_state["signal_exit"] = se
- if isinstance(csl, dict):
- market_state["custom_stop_loss"] = csl
-
- # -----------------------------------------
+ # ---------------------------
# EXIT LOGIC (tick-based)
- # -----------------------------------------
+ # ---------------------------
self.position_manager.on_tick(market_state=market_state)
- # -----------------------------------------
+ # ---------------------------
# ENTRY LOGIC (candle-based)
- # -----------------------------------------
- candle_time = market_state.get("candle_time")
- if candle_time is None:
- return
-
- if self._last_candle_time == candle_time:
+ # ---------------------------
+ if market_state.get("candle_time") is None:
return
- self._last_candle_time = candle_time
-
- result = self.strategy_adapter.on_new_candle()
- self._last_strategy_row = result.last_row
+ result = self.strategy_runner.run()
+ self._last_strategy_state = result.last_row
if result.plan is not None:
self.position_manager.on_trade_plan(
diff --git a/core/live_trading/execution/mt5_adapter.py b/core/live_trading/execution/mt5_adapter.py
index 0c41b13..8876d3d 100644
--- a/core/live_trading/execution/mt5_adapter.py
+++ b/core/live_trading/execution/mt5_adapter.py
@@ -10,24 +10,22 @@ class MT5Adapter:
"""
def __init__(
- self,
- *,
- login: int | None = None,
- password: str | None = None,
- server: str | None = None,
- dry_run: bool = False,
+ self,
+ *,
+ dry_run: bool = False,
+ log,
):
self.dry_run = dry_run
+ self.log = log
if self.dry_run:
- print("⚠ MT5Adapter running in DRY-RUN mode")
+ self.log.warning("MT5Adapter running in DRY-RUN mode")
return
if not mt5.terminal_info():
raise RuntimeError("MT5 terminal not initialized")
- print("🟢 MT5 initialized")
-
+ self.log.info("MT5 initialized")
# ==================================================
# Execution API
# ==================================================
@@ -38,25 +36,34 @@ def open_position(
symbol: str,
direction: str,
volume: float,
- price: float,
sl: float,
tp: float | None = None,
) -> Dict[str, Any]:
if self.dry_run:
- print(
+ self.log.debug(
f"[DRY-RUN] OPEN {symbol} {direction} "
- f"vol={volume} price={price} sl={sl} tp={tp}"
+ f"vol={volume} sl={sl} tp={tp}"
)
- return {"ticket": f"MOCK_{symbol}", "price": price}
+ return {"ticket": f"MOCK_{symbol}", "price": None}
# --------------------------------------------------
- # SYMBOL + TICK
+ # SYMBOL INFO / MODE
# --------------------------------------------------
symbol_info = mt5.symbol_info(symbol)
if symbol_info is None:
raise RuntimeError(f"Symbol not found: {symbol}")
+ if symbol_info.trade_mode == mt5.SYMBOL_TRADE_MODE_CLOSEONLY:
+ raise RuntimeError(f"Symbol {symbol} is CLOSE-ONLY")
+
+ # --------------------------------------------------
+ # NETTING GUARD
+ # --------------------------------------------------
+ positions = mt5.positions_get(symbol=symbol)
+ if positions:
+ raise RuntimeError(f"Position already open for {symbol}")
+
if not symbol_info.visible:
mt5.symbol_select(symbol, True)
@@ -67,33 +74,32 @@ def open_position(
market_price = tick.ask if direction == "long" else tick.bid
# --------------------------------------------------
- # VALIDATE SL / TP (ABSOLUTELY REQUIRED)
+ # SL / TP VALIDATION
# --------------------------------------------------
stops_level = symbol_info.trade_stops_level * symbol_info.point
if direction == "long":
if sl >= market_price:
- raise RuntimeError(f"Invalid SL for long: sl={sl}, price={market_price}")
+ raise RuntimeError("Invalid SL for long")
if tp is not None and tp <= market_price:
- raise RuntimeError(f"Invalid TP for long: tp={tp}, price={market_price}")
+ raise RuntimeError("Invalid TP for long")
if (market_price - sl) < stops_level:
- raise RuntimeError(f"SL too close: {market_price - sl} < {stops_level}")
- if tp is not None and (tp - market_price) < stops_level:
- raise RuntimeError(f"TP too close: {tp - market_price} < {stops_level}")
+ raise RuntimeError("SL too close")
+ if tp and (tp - market_price) < stops_level:
+ raise RuntimeError("TP too close")
order_type = mt5.ORDER_TYPE_BUY
-
- else: # short
+ else:
if sl <= market_price:
- raise RuntimeError(f"Invalid SL for short: sl={sl}, price={market_price}")
+ raise RuntimeError("Invalid SL for short")
if tp is not None and tp >= market_price:
- raise RuntimeError(f"Invalid TP for short: tp={tp}, price={market_price}")
+ raise RuntimeError("Invalid TP for short")
if (sl - market_price) < stops_level:
- raise RuntimeError(f"SL too close: {sl - market_price} < {stops_level}")
- if tp is not None and (market_price - tp) < stops_level:
- raise RuntimeError(f"TP too close: {market_price - tp} < {stops_level}")
+ raise RuntimeError("SL too close")
+ if tp and (market_price - tp) < stops_level:
+ raise RuntimeError("TP too close")
order_type = mt5.ORDER_TYPE_SELL
@@ -133,7 +139,7 @@ def close_position(
) -> None:
if self.dry_run:
- print(f"[DRY-RUN] CLOSE ticket={ticket} price={price}")
+ self.log.debug(f"[DRY-RUN] CLOSE ticket={ticket} price={price}")
return
positions = mt5.positions_get(ticket=int(ticket))
@@ -168,7 +174,7 @@ def close_position(
def close_partial(self, *, ticket: str, volume: float, price: float):
if self.dry_run:
- print(f"[DRY-RUN] PARTIAL CLOSE ticket={ticket} vol={volume} price={price}")
+ self.log.debug(f"[DRY-RUN] PARTIAL CLOSE ticket={ticket} vol={volume} price={price}")
return
request = {
@@ -187,7 +193,7 @@ def close_partial(self, *, ticket: str, volume: float, price: float):
def modify_sl(self, *, ticket: str, new_sl: float):
if self.dry_run:
- print(f"[DRY-RUN] MODIFY SL ticket={ticket} sl={new_sl}")
+ self.log.debug(f"[DRY-RUN] MODIFY SL ticket={ticket} sl={new_sl}")
return
request = {
@@ -206,7 +212,7 @@ def init_mt5(self):
raise RuntimeError(f"MT5 init failed: {mt5.last_error()}")
info = mt5.account_info()
- print(
+ self.log.debug(
"🟢 MT5 initialized | "
f"Account={info.login} "
f"Balance={info.balance} "
@@ -216,4 +222,4 @@ def init_mt5(self):
def shutdown(self):
if not self.dry_run:
mt5.shutdown()
- print("🔴 MT5 shutdown")
+ self.log.debug("🔴 MT5 shutdown")
diff --git a/core/live_trading/execution/position_manager.py b/core/live_trading/execution/position_manager.py
index 80f84f7..5230419 100644
--- a/core/live_trading/execution/position_manager.py
+++ b/core/live_trading/execution/position_manager.py
@@ -22,7 +22,7 @@ class PositionManager:
Responsibilities:
- consume TradePlan and execute entry
- - monitor active positions and perform exits (engine-managed)
+ - monitor active positions and perform exits
- keep repo in sync with broker
"""
@@ -31,6 +31,13 @@ def __init__(self, repo: TradeRepo, adapter: MT5Adapter):
self.adapter = adapter
self.state = TradeStateService(repo=repo, adapter=adapter)
+ # ==================================================
+ # Helpers
+ # ==================================================
+
+ def _is_dry_run(self) -> bool:
+ return bool(getattr(self.adapter, "dry_run", False))
+
# ==================================================
# ENTRY
# ==================================================
@@ -43,7 +50,11 @@ def on_trade_plan(self, *, plan: TradePlan, market_state: dict) -> None:
execution = ExitExecution.from_config(plan.strategy_config)
volume = self._compute_volume(plan=plan)
- params = trade_plan_to_mt5_order(plan=plan, volume=volume, execution=execution)
+ params = trade_plan_to_mt5_order(
+ plan=plan,
+ volume=volume,
+ execution=execution,
+ )
print(
f"📦 EXECUTING TRADE PLAN | {plan.symbol} {plan.direction} "
@@ -54,13 +65,15 @@ def on_trade_plan(self, *, plan: TradePlan, market_state: dict) -> None:
symbol=params.symbol,
direction=params.direction,
volume=params.volume,
- price=params.price,
sl=params.sl,
tp=params.tp,
)
- # Store execution policy with the trade so tick handler uses the same rules.
- # If your repo does not support extra payload, persist it inside the stored trade dict.
+ # ENTRY may be skipped (CLOSE-ONLY, DRY_RUN guard etc.)
+ if result is None:
+ print(f"⚠ ENTRY skipped for {plan.symbol}")
+ return
+
self.state.record_entry(
plan=plan,
exec_result=result,
@@ -77,8 +90,7 @@ def _compute_volume(self, *, plan: TradePlan) -> float:
sl=plan.exit_plan.sl,
max_risk=max_risk,
)
- norm = Mt5RiskParams.normalize_volume(plan.symbol, raw_volume)
- return norm
+ return Mt5RiskParams.normalize_volume(plan.symbol, raw_volume)
# ==================================================
# TICK LOOP
@@ -98,26 +110,46 @@ def on_tick(self, *, market_state: dict) -> None:
execution = self._get_execution(trade)
- if self._handle_managed_exit_signal(trade_id=trade_id, trade=trade, market_state=market_state, price=price, now=now):
+ if self._handle_managed_exit_signal(
+ trade_id=trade_id,
+ trade=trade,
+ market_state=market_state,
+ price=price,
+ now=now,
+ ):
continue
- # TP1 / BE (only if TP1 is engine-managed; otherwise repo should mark it from broker sync or you ignore TP1)
- if self._handle_tp1_and_be(trade_id=trade_id, trade=trade, execution=execution, price=price, now=now):
+ if self._handle_tp1_and_be(
+ trade_id=trade_id,
+ trade=trade,
+ execution=execution,
+ price=price,
+ now=now,
+ ):
continue
- self._handle_trailing_sl(trade_id=trade_id, trade=trade, market_state=market_state)
+ self._handle_trailing_sl(
+ trade_id=trade_id,
+ trade=trade,
+ market_state=market_state,
+ )
- if self._handle_engine_exit(trade_id=trade_id, trade=trade, execution=execution, price=price, now=now):
- continue
+ self._handle_engine_exit(
+ trade_id=trade_id,
+ trade=trade,
+ execution=execution,
+ price=price,
+ now=now,
+ )
# ==================================================
- # Tick handlers (small, testable pieces)
+ # Tick handlers
# ==================================================
def _handle_broker_sync(self, *, trade_id: str, trade: dict, now: datetime) -> bool:
- """
- If broker closed the position (e.g. broker TP2), sync repo and stop processing.
- """
+ if self._is_dry_run():
+ return False
+
positions = mt5.positions_get(ticket=int(trade["ticket"]))
if positions:
return False
@@ -150,7 +182,13 @@ def _handle_managed_exit_signal(
and signal_exit.get("direction") == "close"
):
print(f"🚪 MANAGED EXIT for {trade_id}")
- self.adapter.close_position(ticket=trade["ticket"], price=price)
+
+ if not self._is_dry_run():
+ self.adapter.close_position(
+ ticket=trade["ticket"],
+ price=price,
+ )
+
self.state.record_exit(
trade_id=trade_id,
price=price,
@@ -171,36 +209,36 @@ def _handle_tp1_and_be(
price: float,
now: datetime,
) -> bool:
- """
- TP1 partial close and optional SL->BE.
-
- Rules:
- - If TP1 is DISABLED -> do nothing.
- - If TP1 is BROKER -> do not do partial close (broker closes full; partial isn't supported that way).
- (You can still do BE_ON_TP1 if you detect price reached tp1; that's optional and controlled below.)
- - If TP1 is ENGINE -> run partial close logic.
- """
if trade.get("tp1_executed"):
return False
if execution.tp1 == "DISABLED":
return False
- # Detect TP1 reached (price-based)
if not LiveExitRules.check_tp1_hit(trade=trade, price=price):
return False
if execution.tp1 == "ENGINE":
- self._execute_tp1_partial(trade_id=trade_id, trade=trade, price=price, now=now)
+ self._execute_tp1_partial(
+ trade_id=trade_id,
+ trade=trade,
+ price=price,
+ now=now,
+ )
- # Move SL -> BE when TP1 is reached (applies for ENGINE or BROKER TP1 if you want)
if execution.be_on_tp1:
self._try_move_sl_to_be(trade_id=trade_id, trade=trade)
- # If we just executed TP1 partial, we end tick processing for that trade
return execution.tp1 == "ENGINE"
- def _execute_tp1_partial(self, *, trade_id: str, trade: dict, price: float, now: datetime) -> None:
+ def _execute_tp1_partial(
+ self,
+ *,
+ trade_id: str,
+ trade: dict,
+ price: float,
+ now: datetime,
+ ) -> None:
total_vol = float(trade["volume"])
cfg = trade.get("strategy_config", {})
close_ratio = float(cfg.get("TP1_CLOSE_RATIO", 0.5))
@@ -212,7 +250,12 @@ def _execute_tp1_partial(self, *, trade_id: str, trade: dict, price: float, now:
print(f"🎯 TP1 PARTIAL CLOSE {trade_id}: {close_vol}/{total_vol}")
- self.adapter.close_partial(ticket=trade["ticket"], volume=close_vol, price=price)
+ if not self._is_dry_run():
+ self.adapter.close_partial(
+ ticket=trade["ticket"],
+ volume=close_vol,
+ price=price,
+ )
self.state.mark_tp1_executed(
trade_id=trade_id,
@@ -226,7 +269,10 @@ def _try_move_sl_to_be(self, *, trade_id: str, trade: dict) -> None:
current_sl = float(trade["sl"])
direction = trade["direction"]
- already_be = (direction == "long" and current_sl >= entry) or (direction == "short" and current_sl <= entry)
+ already_be = (
+ (direction == "long" and current_sl >= entry)
+ or (direction == "short" and current_sl <= entry)
+ )
if already_be:
return
@@ -234,7 +280,13 @@ def _try_move_sl_to_be(self, *, trade_id: str, trade: dict) -> None:
self.state.update_sl(trade_id=trade_id, new_sl=entry)
self.state.set_flag(trade_id=trade_id, key="be_moved", value=True)
- def _handle_trailing_sl(self, *, trade_id: str, trade: dict, market_state: dict) -> None:
+ def _handle_trailing_sl(
+ self,
+ *,
+ trade_id: str,
+ trade: dict,
+ market_state: dict,
+ ) -> None:
cfg = trade.get("strategy_config", {})
if not cfg.get("USE_TRAILING"):
return
@@ -255,7 +307,10 @@ def _handle_trailing_sl(self, *, trade_id: str, trade: dict, market_state: dict)
current_sl = float(trade["sl"])
direction = trade["direction"]
- improved = (direction == "long" and candidate > current_sl) or (direction == "short" and candidate < current_sl)
+ improved = (
+ (direction == "long" and candidate > current_sl)
+ or (direction == "short" and candidate < current_sl)
+ )
if not improved:
return
@@ -271,23 +326,16 @@ def _handle_engine_exit(
price: float,
now: datetime,
) -> bool:
- """
- Engine-managed exits:
- - SL is always engine-managed (we close if price crosses SL)
- - TP2 can be engine-managed or broker-managed
- """
- # If TP2 is broker-managed, we still allow engine check as a safety net.
exit_res = LiveExitRules.check_exit(trade=trade, price=price, now=now)
if exit_res is None:
return False
- # Respect TP2 executor if you want strict behavior:
- if exit_res.reason == "TP2" and execution.tp2 != "ENGINE":
- # broker should close it; keep it as safety net? choose:
- # return False # strict
- pass # permissive safety net (default)
+ if not self._is_dry_run():
+ self.adapter.close_position(
+ ticket=trade["ticket"],
+ price=exit_res.exit_price,
+ )
- self.adapter.close_position(ticket=trade["ticket"], price=exit_res.exit_price)
self.state.record_exit(
trade_id=trade_id,
price=exit_res.exit_price,
@@ -298,12 +346,8 @@ def _handle_engine_exit(
return True
def _get_execution(self, trade: dict) -> ExitExecution:
- """
- Prefer persisted execution policy from repo; fallback to strategy_config.
- """
raw = trade.get("exit_execution")
if isinstance(raw, dict):
- # reconstruct via config to keep defaults consistent
return ExitExecution.from_config({"EXIT_EXECUTION": raw})
cfg = trade.get("strategy_config", {})
diff --git a/core/live_trading/logging.py b/core/live_trading/logging.py
new file mode 100644
index 0000000..5a3ca77
--- /dev/null
+++ b/core/live_trading/logging.py
@@ -0,0 +1,21 @@
+from core.logging.run_logger import RunLogger
+from core.logging.config import LoggerConfig
+from core.logging.prefix import LOG_PREFIX
+
+
+def create_live_logger(symbol: str) -> RunLogger:
+ cfg = LoggerConfig(
+ stdout=True,
+ file=False,
+ timing=False,
+ profiling=False,
+ )
+
+ return (
+ RunLogger(
+ name="live",
+ cfg=cfg,
+ prefix=LOG_PREFIX["LIVE"],
+ )
+ .with_context(symbol=symbol)
+ )
\ No newline at end of file
diff --git a/core/live_trading/market_state.py b/core/live_trading/market_state.py
new file mode 100644
index 0000000..51f26c1
--- /dev/null
+++ b/core/live_trading/market_state.py
@@ -0,0 +1,20 @@
+from abc import ABC, abstractmethod
+from typing import Optional, Dict, Any
+
+
+class MarketStateProvider(ABC):
+ """
+ Provides market state events for the LiveEngine.
+ """
+
+ @abstractmethod
+ def poll(self) -> Optional[Dict[str, Any]]:
+ """
+ Returns:
+ {
+ "price": float,
+ "time": datetime,
+ "candle_time": datetime | None
+ }
+ """
+ ...
\ No newline at end of file
diff --git a/core/live_trading/mt5_market_state.py b/core/live_trading/mt5_market_state.py
new file mode 100644
index 0000000..2546124
--- /dev/null
+++ b/core/live_trading/mt5_market_state.py
@@ -0,0 +1,38 @@
+import MetaTrader5 as mt5
+import pandas as pd
+from datetime import datetime
+
+from core.live_trading.market_state import MarketStateProvider
+from core.utils.timeframe import MT5_TIMEFRAME_MAP
+
+
+class MT5MarketStateProvider(MarketStateProvider):
+ """
+ Polls MT5 for last closed candle.
+ """
+
+ def __init__(self, *, symbol: str, timeframe: str):
+ self.symbol = symbol
+ self.timeframe = timeframe
+ self._last_candle_time = None
+
+ def poll(self):
+ tf = MT5_TIMEFRAME_MAP[self.timeframe]
+ rates = mt5.copy_rates_from_pos(self.symbol, tf, 0, 2)
+
+ if rates is None or len(rates) < 2:
+ return None
+
+ last_closed = rates[-2]
+ candle_time = pd.to_datetime(
+ last_closed["time"], unit="s", utc=True
+ )
+
+ is_new_candle = candle_time != self._last_candle_time
+ self._last_candle_time = candle_time
+
+ return {
+ "price": float(last_closed["close"]),
+ "time": candle_time,
+ "candle_time": candle_time if is_new_candle else None,
+ }
\ No newline at end of file
diff --git a/core/live_trading/run_trading.py b/core/live_trading/run_trading.py
index 958bdca..69806ec 100644
--- a/core/live_trading/run_trading.py
+++ b/core/live_trading/run_trading.py
@@ -5,183 +5,95 @@
lookback_to_bars,
MT5Client,
)
+from core.data_provider.providers.live_provider import LiveStrategyDataProvider
from core.live_trading.engine import LiveEngine
-from core.live_trading.strategy_adapter import LiveStrategyAdapter
-from core.live_trading.execution import PositionManager
-from core.live_trading.execution import MT5Adapter
+from core.live_trading.execution.mt5_adapter import MT5Adapter
+from core.live_trading.execution.position_manager import PositionManager
+from core.live_trading.logging import create_live_logger
+from core.live_trading.mt5_market_state import MT5MarketStateProvider
+from core.live_trading.strategy_runner import LiveStrategyRunner
+
from core.live_trading.trade_repo import TradeRepo
from core.live_trading.strategy_loader import load_strategy_class
-from core.utils.lookback import LOOKBACK_CONFIG, MIN_HTF_BARS
-from core.utils.timeframe import MT5_TIMEFRAME_MAP
+from core.logging.config import LoggerConfig
+from core.logging.prefix import LOG_PREFIX
+from core.logging.run_logger import RunLogger
+from core.utils.lookback import LOOKBACK_CONFIG
class LiveTradingRunner:
"""
- MT5 live trading runner.
- Symmetric API to BacktestRunner.
+ Live trading application runner.
"""
def __init__(self, cfg):
self.cfg = cfg
+ self.log = create_live_logger(cfg.SYMBOLS)
- self.engine = None
- self.strategy = None
- self.provider = None
-
- # ==================================================
- # 1️⃣ MT5 INIT
- # ==================================================
+ def run(self):
+ self.log.info("starting live trading runner")
- def _init_mt5(self):
if not mt5.initialize():
- raise RuntimeError(f"MT5 init failed: {mt5.last_error()}")
-
- if not mt5.symbol_select(self.cfg.SYMBOLS, True):
- raise RuntimeError(
- f"Symbol not available: {self.cfg.SYMBOLS}"
- )
-
- info = mt5.account_info()
- print(
- f"🟢 MT5 connected | "
- f"Account={info.login} "
- f"Balance={info.balance}"
- )
-
- # ==================================================
- # 2️⃣ INITIAL DATA (WARMUP)
- # ==================================================
-
- def _load_initial_data(self) -> pd.DataFrame:
- tf = MT5_TIMEFRAME_MAP[self.cfg.TIMEFRAME]
- lookback = LOOKBACK_CONFIG[self.cfg.TIMEFRAME]
- bars = lookback_to_bars(self.cfg.TIMEFRAME, lookback)
-
- rates = mt5.copy_rates_from_pos(
- self.cfg.SYMBOLS, tf, 0, bars
- )
-
- if rates is None or len(rates) == 0:
- raise RuntimeError("Initial MT5 data fetch failed")
-
- df = pd.DataFrame(rates)
- df["time"] = pd.to_datetime(df["time"], unit="s", utc=True)
+ self.log.error("MT5 init failed")
+ raise RuntimeError("MT5 init failed")
- print(
- f"📦 Warmup loaded | "
- f"{len(df)} candles ({self.cfg.TIMEFRAME})"
+ mt5.symbol_select(self.cfg.SYMBOLS, True)
+ self.log.info(
+ "MT5 initialized",
)
- return df
-
- # ==================================================
- # 3️⃣ INFORMATIVE PROVIDER
- # ==================================================
-
- def _build_provider(self) -> MT5Client:
- StrategyClass = load_strategy_class(
- self.cfg.STRATEGY_CLASS
- )
+ StrategyClass = load_strategy_class(self.cfg.STRATEGY_CLASS)
bars_per_tf = {}
- for tf in StrategyClass.get_required_informatives():
+ for tf in [self.cfg.TIMEFRAME] + StrategyClass.get_required_informatives():
lookback = LOOKBACK_CONFIG[tf]
- bars_per_tf[tf] = max(
- lookback_to_bars(tf, lookback),
- MIN_HTF_BARS.get(tf, 0),
- )
+ bars_per_tf[tf] = lookback_to_bars(tf, lookback)
- provider = MT5Client(bars_per_tf=bars_per_tf)
-
- return provider
+ self.log.debug(
+ "bars_per_tf built",
+ # jeśli chcesz, możesz to dać do contextu
+ )
- # ==================================================
- # 4️⃣ STRATEGY
- # ==================================================
+ client = MT5Client()
+ data_provider = LiveStrategyDataProvider(
+ client=client,
+ bars_per_tf=bars_per_tf,
+ )
- def _build_strategy(self, df_execution: pd.DataFrame):
- self.strategy = load_strategy_class(self.cfg.STRATEGY_CLASS)(
- df=df_execution,
+ strategy = StrategyClass(
+ df=None,
symbol=self.cfg.SYMBOLS,
startup_candle_count=self.cfg.STARTUP_CANDLE_COUNT,
)
- self.strategy.validate()
-
- # ==================================================
- # 5️⃣ ENGINE
- # ==================================================
-
- def _build_engine(self):
-
- adapter = MT5Adapter(dry_run=self.cfg.DRY_RUN)
- repo = TradeRepo()
- pm = PositionManager(repo=repo, adapter=adapter)
-
- strategy_adapter = LiveStrategyAdapter(
- strategy=self.strategy,
- provider=self.provider,
+ strategy_runner = LiveStrategyRunner(
+ strategy=strategy,
+ data_provider=data_provider,
symbol=self.cfg.SYMBOLS,
- startup_candle_count=self.cfg.STARTUP_CANDLE_COUNT,
- df_execution=self.df_execution,
)
- tf = MT5_TIMEFRAME_MAP[self.cfg.TIMEFRAME]
-
- def market_data_provider():
- rates = mt5.copy_rates_from_pos(
- self.cfg.SYMBOLS, tf, 0, 2
- )
- if rates is None or len(rates) < 2:
- return None
-
- last_closed = rates[-2]
-
- candle_ts = pd.to_datetime(
- last_closed["time"], unit="s", utc=True
- )
+ market_state_provider = MT5MarketStateProvider(
+ symbol=self.cfg.SYMBOLS,
+ timeframe=self.cfg.TIMEFRAME,
+ )
- return {
- "price": float(last_closed["close"]),
- "time": candle_ts, # logical event time
- "candle_time": candle_ts # SAME TYPE, SAME MEANING
- }
+ adapter = MT5Adapter(
+ dry_run=self.cfg.DRY_RUN,
+ log=self.log.with_context(component="adapter"),
+ )
+ repo = TradeRepo()
+ pm = PositionManager(repo=repo, adapter=adapter)
- self.engine = LiveEngine(
+ engine = LiveEngine(
position_manager=pm,
- market_data_provider=market_data_provider,
- strategy_adapter=strategy_adapter,
+ market_state_provider=market_state_provider,
+ strategy_runner=strategy_runner,
tick_interval_sec=self.cfg.TICK_INTERVAL_SEC,
)
- print("⚙️ LiveEngine ready")
-
- # ==================================================
- # 6️⃣ RUN
- # ==================================================
-
- def run(self):
- print("🚀 LiveTradingRunner start")
-
- self._init_mt5()
- self.df_execution = self._load_initial_data()
- self.provider = self._build_provider()
-
- self._build_strategy(self.df_execution)
- self._build_engine()
-
- print(
- f"🚀 LIVE TRADING STARTED | "
- f"{self.cfg.SYMBOLS} {self.cfg.TIMEFRAME} "
- f"DRY_RUN={self.cfg.DRY_RUN}"
- )
-
- self.engine.start()
-
- # ==================================================
- # 7️⃣ SHUTDOWN
- # ==================================================
+ self.log.with_context(
+ timeframe=self.cfg.TIMEFRAME,
+ dry_run=self.cfg.DRY_RUN,
+ ).info("LIVE STARTED")
- def shutdown(self):
- mt5.shutdown()
- print("🔴 MT5 shutdown")
\ No newline at end of file
+ engine.start()
\ No newline at end of file
diff --git a/core/live_trading/strategy_adapter.py b/core/live_trading/strategy_adapter.py
deleted file mode 100644
index e70f115..0000000
--- a/core/live_trading/strategy_adapter.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Any, Optional
-
-import pandas as pd
-
-from core.strategy.orchestration.strategy_execution import execute_strategy
-from core.strategy.plan_builder import PlanBuildContext
-from core.strategy.trade_plan import TradePlan
-
-
-@dataclass(frozen=True)
-class StrategyCandleResult:
- df_plot: pd.DataFrame
- last_row: pd.Series
- plan: TradePlan | None
-
-
-class LiveStrategyAdapter:
- """
- Live orchestration adapter.
-
- Responsibilities:
- - execute vector strategy update on df_execution
- - produce entry plan from last row using BaseStrategy default builder
- """
-
- def __init__(
- self,
- *,
- strategy,
- provider,
- symbol: str,
- startup_candle_count: int,
- df_execution: pd.DataFrame,
- ):
- self.strategy = strategy
- self.provider = provider
- self.symbol = symbol
- self.startup_candle_count = startup_candle_count
- self.df_execution = df_execution
-
- self._last_df_plot: Optional[pd.DataFrame] = None
-
- def on_new_candle(self) -> StrategyCandleResult:
- df_plot = execute_strategy(
- strategy=self.strategy,
- df=self.df_execution,
- provider=self.provider,
- symbol=self.symbol,
- startup_candle_count=self.startup_candle_count,
- )
-
- if df_plot is None or df_plot.empty:
- # hard guard: strategy must return df
- raise RuntimeError("execute_strategy returned empty df")
-
- last_row = df_plot.iloc[-1]
-
- ctx = PlanBuildContext(
- symbol=self.symbol,
- strategy_name=type(self.strategy).__name__,
- strategy_config=getattr(self.strategy, "strategy_config", {}) or {},
- )
-
- # Expect BaseStrategy provides this default method.
- build_fn = getattr(self.strategy, "build_trade_plan_live", None)
- plan = build_fn(row=last_row, ctx=ctx) if callable(build_fn) else None
-
- self._last_df_plot = df_plot
- return StrategyCandleResult(df_plot=df_plot, last_row=last_row, plan=plan)
-
- def last_df_plot(self) -> Optional[pd.DataFrame]:
- return self._last_df_plot
\ No newline at end of file
diff --git a/core/live_trading/strategy_runner.py b/core/live_trading/strategy_runner.py
new file mode 100644
index 0000000..44086b7
--- /dev/null
+++ b/core/live_trading/strategy_runner.py
@@ -0,0 +1,72 @@
+from __future__ import annotations
+
+from typing import Optional
+
+import pandas as pd
+
+from core.strategy.orchestration.informatives import apply_informatives
+from core.strategy.plan_builder import PlanBuildContext
+from core.utils.timeframe import tf_to_minutes
+
+
+
+
+class StrategyCandleResult:
+ def __init__(self, *, last_row: pd.Series, plan):
+ self.last_row = last_row
+ self.plan = plan
+
+
+class LiveStrategyRunner:
+ """
+ Runs strategy on each new candle.
+ """
+
+ def __init__(
+ self,
+ *,
+ strategy,
+ data_provider,
+ symbol: str,
+ ):
+ self.strategy = strategy
+ self.data_provider = data_provider
+ self.symbol = symbol
+ self._last_df: Optional[pd.DataFrame] = None
+
+ def run(self) -> StrategyCandleResult:
+ data_by_tf = self.data_provider.fetch(self.symbol)
+
+ base_tf = min(data_by_tf.keys(), key=tf_to_minutes)
+ df_base = data_by_tf[base_tf]
+
+ df_context = apply_informatives(
+ df=df_base,
+ strategy=self.strategy,
+ data_by_tf=data_by_tf,
+ )
+
+ self.strategy.df = df_context
+ self.strategy.populate_indicators()
+ self.strategy.populate_entry_trend()
+ self.strategy.populate_exit_trend()
+
+ last_row = df_context.iloc[-1]
+
+ ctx = PlanBuildContext(
+ symbol=self.symbol,
+ strategy_name=self.strategy.get_strategy_name(),
+ strategy_config=self.strategy.strategy_config,
+ )
+
+ plan = self.strategy.build_trade_plan_live(
+ row=last_row,
+ ctx=ctx,
+ )
+
+ self._last_df = df_context
+
+ return StrategyCandleResult(
+ last_row=last_row,
+ plan=plan,
+ )
\ No newline at end of file
diff --git a/core/live_trading/tests/conftest.py b/core/live_trading/tests/conftest.py
new file mode 100644
index 0000000..efd5a4c
--- /dev/null
+++ b/core/live_trading/tests/conftest.py
@@ -0,0 +1,6 @@
+import pytest
+from datetime import datetime, timezone
+
+@pytest.fixture
+def fixed_now():
+ return datetime(2025, 1, 1, 12, 0, tzinfo=timezone.utc)
\ No newline at end of file
diff --git a/core/live_trading/tests/live_state/active_trades.json b/core/live_trading/tests/live_state/active_trades.json
deleted file mode 100644
index 9e26dfe..0000000
--- a/core/live_trading/tests/live_state/active_trades.json
+++ /dev/null
@@ -1 +0,0 @@
-{}
\ No newline at end of file
diff --git a/core/live_trading/tests/live_state/closed_trades.json b/core/live_trading/tests/live_state/closed_trades.json
deleted file mode 100644
index f0fb124..0000000
--- a/core/live_trading/tests/live_state/closed_trades.json
+++ /dev/null
@@ -1,142 +0,0 @@
-{
- "369957004": {
- "trade_id": 369957004,
- "symbol": "BTCUSD",
- "direction": "long",
- "entry_price": 95270.97,
- "volume": 0.0,
- "sl": 95260.3700304329,
- "tp1": 95281.5699695671,
- "tp2": 95292.16993913421,
- "tp1_executed": false,
- "entry_time": "2026-01-17T20:20:04.675410",
- "entry_tag": "long_trend1",
- "strategy": "Hts",
- "strategy_config": {},
- "ticket": 369957004,
- "exit_price": 95292.16993913421,
- "exit_time": "2026-01-17 20:26:47.837174",
- "exit_reason": "BROKER_CLOSED",
- "exit_level_tag": "TP2_live"
- },
- "369957133": {
- "trade_id": 369957133,
- "symbol": "BTCUSD",
- "direction": "long",
- "entry_price": 95293.39,
- "volume": 0.0,
- "sl": 95281.19661899896,
- "tp1": 95305.58338100104,
- "tp2": 95317.77676200207,
- "tp1_executed": false,
- "entry_time": "2026-01-17T20:26:47.837174",
- "entry_tag": "long_trend1",
- "strategy": "Hts",
- "strategy_config": {},
- "ticket": 369957133,
- "exit_price": 95317.77676200207,
- "exit_time": "2026-01-17 20:28:56.273059",
- "exit_reason": "BROKER_CLOSED",
- "exit_level_tag": "TP2_live"
- },
- "369957207": {
- "trade_id": 369957207,
- "symbol": "BTCUSD",
- "direction": "short",
- "entry_price": 95288.48,
- "volume": 0.0,
- "sl": 95307.13210570674,
- "tp1": 95269.82789429325,
- "tp2": 95251.17578858651,
- "tp1_executed": false,
- "entry_time": "2026-01-17T20:29:14.113469",
- "entry_tag": "short_trend1",
- "strategy": "Hts",
- "strategy_config": {},
- "ticket": 369957207,
- "exit_price": 95251.17578858651,
- "exit_time": "2026-01-17 20:32:20.897390",
- "exit_reason": "BROKER_CLOSED",
- "exit_level_tag": "TP2_live"
- },
- "369957531": {
- "trade_id": 369957531,
- "symbol": "BTCUSD",
- "direction": "short",
- "entry_price": 95256.22,
- "volume": 0.0,
- "sl": 95271.83845894576,
- "tp1": 95240.60154105425,
- "tp2": 95224.98308210849,
- "tp1_executed": false,
- "entry_time": "2026-01-17T20:39:40.342580",
- "entry_tag": "short_trend1",
- "strategy": "Hts",
- "strategy_config": {},
- "ticket": 369957531,
- "exit_price": 95224.98308210849,
- "exit_time": "2026-01-17 20:42:23.287946",
- "exit_reason": "BROKER_CLOSED",
- "exit_level_tag": "TP2_live"
- },
- "369957556": {
- "trade_id": 369957556,
- "symbol": "BTCUSD",
- "direction": "long",
- "entry_price": 95264.67,
- "volume": 0.0,
- "sl": 95250.8447396852,
- "tp1": 95278.4952603148,
- "tp2": 95292.3205206296,
- "tp1_executed": false,
- "entry_time": "2026-01-17T20:42:23.287946",
- "entry_tag": "long_trend1",
- "strategy": "Hts",
- "strategy_config": {},
- "ticket": 369957556,
- "exit_price": 95292.3205206296,
- "exit_time": "2026-01-17 20:44:14.323056",
- "exit_reason": "BROKER_CLOSED",
- "exit_level_tag": "TP2_live"
- },
- "369957774": {
- "trade_id": 369957774,
- "symbol": "BTCUSD",
- "direction": "long",
- "entry_price": 95270.41,
- "volume": 0.0,
- "sl": 95257.81345265859,
- "tp1": 95283.00654734141,
- "tp2": 95295.60309468283,
- "tp1_executed": false,
- "entry_time": "2026-01-17T20:44:14.323056",
- "entry_tag": "long_trend1",
- "strategy": "Hts",
- "strategy_config": {},
- "ticket": 369957774,
- "exit_price": 95295.60309468283,
- "exit_time": "2026-01-17 20:45:11.186981",
- "exit_reason": "BROKER_CLOSED",
- "exit_level_tag": "TP2_live"
- },
- "369958228": {
- "trade_id": 369958228,
- "symbol": "BTCUSD",
- "direction": "long",
- "entry_price": 95325.68,
- "volume": 0.0,
- "sl": 95243.75358716586,
- "tp1": 95407.60641283412,
- "tp2": 95489.53282566826,
- "tp1_executed": false,
- "entry_time": "2026-01-17T20:51:02.265531",
- "entry_tag": "long_trend1",
- "strategy": "Hts",
- "strategy_config": {},
- "ticket": 369958228,
- "exit_price": 95489.53282566826,
- "exit_time": "2026-01-18 11:22:00+00:00",
- "exit_reason": "BROKER_CLOSED",
- "exit_level_tag": "TP2_live"
- }
-}
\ No newline at end of file
diff --git a/core/live_trading/tests/mocks.py b/core/live_trading/tests/mocks.py
deleted file mode 100644
index 286c844..0000000
--- a/core/live_trading/tests/mocks.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from datetime import datetime
-
-
-def market_data_provider_mock():
- return {
- "price": 1.1000,
- "time": datetime.utcnow(),
- }
-
-
-def signal_provider_mock_once():
- """
- Zwraca jeden sygnał, potem pustą listę
- """
- yielded = {"done": False}
-
- def _provider():
- if yielded["done"]:
- return []
- yielded["done"] = True
- return [{
- "symbol": "EURUSD",
- "direction": "long",
- "volume": 0.1,
- "entry_price": 1.1000,
- "sl": 1.0950,
- "tp1": 1.1020,
- "tp2": 1.1050,
- "entry_tag": "entry_test",
- }]
-
- return _provider
diff --git a/core/live_trading/tests/test_calculate_volume.py b/core/live_trading/tests/test_calculate_volume.py
new file mode 100644
index 0000000..055bce4
--- /dev/null
+++ b/core/live_trading/tests/test_calculate_volume.py
@@ -0,0 +1,22 @@
+from core.live_trading.execution.risk.sizing import LiveSizer
+
+
+def test_calculate_volume_positive(mocker):
+ mocker.patch(
+ "core.live_trading.execution.risk.sizing.LiveSizer.get_account_size",
+ return_value=10_000,
+ )
+
+ mocker.patch(
+ "core.live_trading.execution.risk.sizing.Mt5RiskParams.get_symbol_risk_params",
+ return_value=(0.0001, 10),
+ )
+
+ vol = LiveSizer.calculate_volume(
+ symbol="EURUSD",
+ entry_price=1.2000,
+ sl=1.1900,
+ max_risk=0.01,
+ )
+
+ assert vol > 0
\ No newline at end of file
diff --git a/core/live_trading/tests/test_full_flow.py b/core/live_trading/tests/test_full_flow.py
new file mode 100644
index 0000000..80708e9
--- /dev/null
+++ b/core/live_trading/tests/test_full_flow.py
@@ -0,0 +1,25 @@
+from core.live_trading.engine import LiveEngine
+
+
+def test_live_engine_full_flow(mocker, fixed_now):
+ market = mocker.Mock()
+ market.poll.return_value = {
+ "price": 100,
+ "time": fixed_now,
+ "candle_time": fixed_now,
+ }
+
+ pm = mocker.Mock()
+ runner = mocker.Mock()
+ runner.run.return_value = mocker.Mock(plan="PLAN", last_row={})
+
+ engine = LiveEngine(
+ position_manager=pm,
+ market_state_provider=market,
+ strategy_runner=runner,
+ tick_interval_sec=0,
+ )
+
+ engine._tick()
+
+ pm.on_trade_plan.assert_called_once()
\ No newline at end of file
diff --git a/core/live_trading/tests/test_live_engine.py b/core/live_trading/tests/test_live_engine.py
new file mode 100644
index 0000000..bb6d561
--- /dev/null
+++ b/core/live_trading/tests/test_live_engine.py
@@ -0,0 +1,67 @@
+from core.live_trading.engine import LiveEngine
+
+
+def test_live_engine_calls_on_tick_every_loop(mocker, fixed_now):
+ market = mocker.Mock()
+ market.poll.return_value = {
+ "price": 100.0,
+ "time": fixed_now,
+ "candle_time": None,
+ }
+
+ pm = mocker.Mock()
+ runner = mocker.Mock()
+
+ engine = LiveEngine(
+ position_manager=pm,
+ market_state_provider=market,
+ strategy_runner=runner,
+ tick_interval_sec=0,
+ )
+
+ engine._tick()
+
+ pm.on_tick.assert_called_once()
+ runner.run.assert_not_called()
+
+def test_strategy_runs_only_on_new_candle(mocker, fixed_now):
+ market = mocker.Mock()
+ market.poll.return_value = {
+ "price": 100.0,
+ "time": fixed_now,
+ "candle_time": fixed_now,
+ }
+
+ pm = mocker.Mock()
+ runner = mocker.Mock()
+ runner.run.return_value = mocker.Mock(plan=None, last_row={})
+
+ engine = LiveEngine(
+ position_manager=pm,
+ market_state_provider=market,
+ strategy_runner=runner,
+ tick_interval_sec=0,
+ )
+
+ engine._tick()
+
+ runner.run.assert_called_once()
+
+def test_engine_skips_when_market_state_none(mocker):
+ market = mocker.Mock()
+ market.poll.return_value = None
+
+ pm = mocker.Mock()
+ runner = mocker.Mock()
+
+ engine = LiveEngine(
+ position_manager=pm,
+ market_state_provider=market,
+ strategy_runner=runner,
+ tick_interval_sec=0,
+ )
+
+ engine._tick()
+
+ pm.on_tick.assert_not_called()
+ runner.run.assert_not_called()
\ No newline at end of file
diff --git a/core/live_trading/tests/test_position_manager.py b/core/live_trading/tests/test_position_manager.py
new file mode 100644
index 0000000..8aba13c
--- /dev/null
+++ b/core/live_trading/tests/test_position_manager.py
@@ -0,0 +1,71 @@
+from core.live_trading.execution.position_manager import PositionManager
+
+
+def test_position_manager_blocks_duplicate_entry(mocker):
+ repo = mocker.Mock()
+ repo.load_active.return_value = {"1": {"symbol": "EURUSD"}}
+
+ adapter = mocker.Mock()
+ pm = PositionManager(repo=repo, adapter=adapter)
+
+ plan = mocker.Mock(symbol="EURUSD")
+
+ pm.on_trade_plan(plan=plan, market_state={})
+
+ adapter.open_position.assert_not_called()
+
+def test_exit_on_sl(mocker, fixed_now):
+ repo = mocker.Mock()
+ repo.load_active.return_value = {
+ "1": {
+ "trade_id": "1",
+ "direction": "long",
+ "sl": 99.0,
+ "tp2": None,
+ "entry_time": fixed_now,
+ "ticket": "1",
+ }
+ }
+
+ adapter = mocker.Mock(dry_run=True)
+ pm = PositionManager(repo=repo, adapter=adapter)
+
+ pm.on_tick(
+ market_state={
+ "price": 98.0,
+ "time": fixed_now,
+ }
+ )
+
+ repo.record_exit.assert_called_once()
+
+def test_tp1_partial_and_be(mocker, fixed_now):
+ repo = mocker.Mock()
+ repo.load_active.return_value = {
+ "1": {
+ "trade_id": "1",
+ "direction": "long",
+ "entry_price": 100,
+ "sl": 95,
+ "tp1": 105,
+ "tp1_executed": False,
+ "volume": 1.0,
+ "ticket": "1",
+ "strategy_config": {"TP1_CLOSE_RATIO": 0.5},
+ }
+ }
+
+ adapter = mocker.Mock(dry_run=True)
+ pm = PositionManager(repo=repo, adapter=adapter)
+
+ # 🔧 KLUCZOWE: podmieniamy state
+ pm.state = mocker.Mock()
+
+ pm.on_tick(
+ market_state={
+ "price": 106,
+ "time": fixed_now,
+ }
+ )
+
+ pm.state.mark_tp1_executed.assert_called_once()
\ No newline at end of file
diff --git a/core/live_trading/tests/test_strategy_run.py b/core/live_trading/tests/test_strategy_run.py
new file mode 100644
index 0000000..037b78d
--- /dev/null
+++ b/core/live_trading/tests/test_strategy_run.py
@@ -0,0 +1,54 @@
+from datetime import datetime, timezone
+from unittest.mock import Mock
+
+import pandas as pd
+
+from core.live_trading.strategy_runner import LiveStrategyRunner
+
+
+class DummyStrategy:
+ def __init__(self):
+ self.df = None
+ self.strategy_config = {}
+
+ def get_required_informatives(self):
+ return []
+
+ def populate_indicators(self): pass
+ def populate_entry_trend(self): pass
+ def populate_exit_trend(self): pass
+
+ def get_strategy_name(self):
+ return "dummy"
+
+ def build_trade_plan_live(self, row, ctx):
+ return "PLAN"
+
+
+
+def test_live_strategy_runner_returns_last_row_and_plan():
+ df = pd.DataFrame(
+ {
+ "time": [
+ datetime(2025, 1, 1, 12, 0, tzinfo=timezone.utc),
+ datetime(2025, 1, 1, 12, 5, tzinfo=timezone.utc),
+ ],
+ "signal": ["long", "long"],
+ }
+ )
+
+ provider = Mock()
+ provider.fetch.return_value = {"M5": df}
+
+ strategy = DummyStrategy()
+
+ runner = LiveStrategyRunner(
+ strategy=strategy,
+ data_provider=provider,
+ symbol="EURUSD",
+ )
+
+ result = runner.run()
+
+ assert result.plan == "PLAN"
+ assert result.last_row["signal"] == "long"
\ No newline at end of file
diff --git a/core/live_trading/tests/test_trade_repo.py b/core/live_trading/tests/test_trade_repo.py
new file mode 100644
index 0000000..f9978dd
--- /dev/null
+++ b/core/live_trading/tests/test_trade_repo.py
@@ -0,0 +1,25 @@
+from datetime import datetime
+
+from core.live_trading.trade_repo import TradeRepo
+
+
+def test_trade_repo_is_restart_safe(tmp_path):
+ repo = TradeRepo(data_dir=tmp_path)
+
+ repo.record_entry(
+ trade_id="1",
+ symbol="EURUSD",
+ direction="long",
+ entry_price=100,
+ volume=1,
+ sl=95,
+ tp1=105,
+ tp2=110,
+ entry_time=datetime.utcnow(),
+ entry_tag="test",
+ )
+
+ repo2 = TradeRepo(data_dir=tmp_path)
+
+ active = repo2.load_active()
+ assert "1" in active
\ No newline at end of file
diff --git a/core/live_trading/tests/test_trade_state.py b/core/live_trading/tests/test_trade_state.py
new file mode 100644
index 0000000..d7c34e1
--- /dev/null
+++ b/core/live_trading/tests/test_trade_state.py
@@ -0,0 +1,27 @@
+from core.live_trading.execution.live.trade_state_service import TradeStateService
+
+
+def test_has_active_position_true(mocker):
+ repo = mocker.Mock()
+ repo.load_active.return_value = {
+ "1": {"symbol": "EURUSD"}
+ }
+
+ svc = TradeStateService(repo=repo, adapter=mocker.Mock())
+
+ assert svc.has_active_position("EURUSD") is True
+
+
+def test_update_sl_updates_repo_and_calls_adapter(mocker):
+ repo = mocker.Mock()
+ repo.load_active.return_value = {
+ "1": {"ticket": "1", "sl": 95}
+ }
+
+ adapter = mocker.Mock()
+ svc = TradeStateService(repo=repo, adapter=adapter)
+
+ svc.update_sl(trade_id="1", new_sl=100)
+
+ adapter.modify_sl.assert_called_once()
+ repo.save_active.assert_called_once()
\ No newline at end of file
diff --git a/core/logging/__init__.py b/core/logging/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/core/logging/config.py b/core/logging/config.py
new file mode 100644
index 0000000..8bb23ca
--- /dev/null
+++ b/core/logging/config.py
@@ -0,0 +1,12 @@
+from dataclasses import dataclass
+from pathlib import Path
+
+
+@dataclass
+class LoggerConfig:
+ stdout: bool = True
+ file: bool = False
+ timing: bool = True
+ profiling: bool = False
+ log_dir: Path | None = None
+ level: int = 20
\ No newline at end of file
diff --git a/core/logging/null_logger.py b/core/logging/null_logger.py
new file mode 100644
index 0000000..52ddae6
--- /dev/null
+++ b/core/logging/null_logger.py
@@ -0,0 +1,23 @@
+from contextlib import contextmanager
+
+
+class NullLogger:
+ def debug(self, msg: str): pass
+ def info(self, msg: str): pass
+ def warning(self, msg: str): pass
+ def error(self, msg: str): pass
+ def log(self, msg: str): pass
+
+ def with_context(self, **ctx):
+ return self
+
+ @contextmanager
+ def time(self, label: str):
+ yield
+
+ @contextmanager
+ def section(self, name: str):
+ yield
+
+ def get_timings(self) -> dict[str, float]:
+ return {}
\ No newline at end of file
diff --git a/core/logging/prefix.py b/core/logging/prefix.py
new file mode 100644
index 0000000..73771af
--- /dev/null
+++ b/core/logging/prefix.py
@@ -0,0 +1,8 @@
+LOG_PREFIX = {
+ "DATA": "📈 DATA |",
+ "BT": "🧪 BACKTEST |",
+ "STRAT": "📐 STRATEGY |",
+ "REPORT": "📊 REPORT |",
+ "RUNNER": "🚀 RUN |",
+ "LIVE": "🟢 LIVE |",
+}
\ No newline at end of file
diff --git a/core/logging/profiling.py b/core/logging/profiling.py
new file mode 100644
index 0000000..db0c374
--- /dev/null
+++ b/core/logging/profiling.py
@@ -0,0 +1,18 @@
+from contextlib import contextmanager
+import cProfile
+import pstats
+
+
+@contextmanager
+def profiling(enabled: bool, path):
+ if not enabled:
+ yield
+ return
+
+ pr = cProfile.Profile()
+ pr.enable()
+ yield
+ pr.disable()
+ stats = pstats.Stats(pr)
+ stats.sort_stats("cumtime")
+ stats.dump_stats(path)
\ No newline at end of file
diff --git a/core/logging/run_logger.py b/core/logging/run_logger.py
new file mode 100644
index 0000000..945e8f9
--- /dev/null
+++ b/core/logging/run_logger.py
@@ -0,0 +1,152 @@
+
+import logging
+from contextlib import contextmanager
+from time import perf_counter
+
+
+from core.logging.config import LoggerConfig
+
+
+class RunLogger:
+ """
+ Unified logger for backtest and live.
+
+ Features:
+ - stdout / file logging
+ - log levels (debug/info/warning/error)
+ - structured context (symbol, trade_id, etc.)
+ - timing (step, time, section)
+ - optional profiling
+ """
+
+ def __init__(
+ self,
+ name: str,
+ cfg: LoggerConfig,
+ prefix: str = "",
+ *,
+ context: dict | None = None,
+ logger: logging.Logger | None = None,
+ ):
+ self.cfg = cfg
+ self.name = name
+ self.prefix = prefix
+ self.context = context or {}
+
+ self._t0 = perf_counter()
+ self._t_last = self._t0
+ self._timings: dict[str, float] = {}
+
+ if logger is not None:
+ # child logger with shared handlers
+ self.logger = logger
+ return
+
+ self.logger = logging.getLogger(name)
+ self.logger.setLevel(cfg.level)
+ self.logger.handlers.clear()
+ self.logger.propagate = False
+
+ if cfg.stdout:
+ h = logging.StreamHandler()
+ h.setFormatter(logging.Formatter("%(message)s"))
+ self.logger.addHandler(h)
+
+ if cfg.file:
+ if cfg.log_dir is None:
+ raise ValueError("LoggerConfig.file=True requires log_dir")
+
+ cfg.log_dir.mkdir(parents=True, exist_ok=True)
+ path = cfg.log_dir / f"{name}.log"
+
+ fh = logging.FileHandler(path, encoding="utf-8")
+ fh.setFormatter(
+ logging.Formatter("%(asctime)s | %(message)s")
+ )
+ self.logger.addHandler(fh)
+
+ # ==================================================
+ # Core emit
+ # ==================================================
+
+ def _emit(self, level: int, msg: str):
+ if self.prefix:
+ msg = f"{self.prefix} {msg}"
+
+ if self.context:
+ ctx = " ".join(f"{k}={v}" for k, v in self.context.items())
+ msg = f"{msg} | {ctx}"
+
+ self.logger.log(level, msg)
+
+ # ==================================================
+ # Public log API
+ # ==================================================
+
+ def debug(self, msg: str):
+ self._emit(logging.DEBUG, msg)
+
+ def info(self, msg: str):
+ self._emit(logging.INFO, msg)
+
+ def warning(self, msg: str):
+ self._emit(logging.WARNING, msg)
+
+ def error(self, msg: str):
+ self._emit(logging.ERROR, msg)
+
+ # Backward compatibility
+ def log(self, msg: str):
+ self.info(msg)
+
+ # ==================================================
+ # Context
+ # ==================================================
+
+ def with_context(self, **ctx) -> "RunLogger":
+ return RunLogger(
+ name=self.name,
+ cfg=self.cfg,
+ prefix=self.prefix,
+ context={**self.context, **ctx},
+ logger=self.logger,
+ )
+
+ # ==================================================
+ # Timing helpers
+ # ==================================================
+
+ def step(self, label: str):
+ if not self.cfg.timing:
+ return
+
+ now = perf_counter()
+ delta = now - self._t_last
+ total = now - self._t0
+
+ self.info(
+ f"⏱️ {label:<30} +{delta:6.2f}s | total {total:6.2f}s"
+ )
+ self._t_last = now
+
+ @contextmanager
+ def time(self, label: str):
+ if not self.cfg.timing:
+ yield
+ return
+
+ t0 = perf_counter()
+ yield
+ dt = perf_counter() - t0
+ self.info(f"⏱️ {label:<30} {dt:6.3f}s")
+
+ @contextmanager
+ def section(self, name: str):
+ t0 = perf_counter()
+ yield
+ dt = perf_counter() - t0
+ self._timings[name] = self._timings.get(name, 0.0) + dt
+ self.info(f"⏱️ section {name} {dt:6.3f}s")
+
+ def get_timings(self) -> dict[str, float]:
+ return dict(self._timings)
\ No newline at end of file
diff --git a/core/logging/telegram_handler.py b/core/logging/telegram_handler.py
new file mode 100644
index 0000000..ca34e02
--- /dev/null
+++ b/core/logging/telegram_handler.py
@@ -0,0 +1,3 @@
+class TelegramHandler(logging.Handler):
+ def emit(self, record):
+ send_to_telegram(self.format(record))
\ No newline at end of file
diff --git a/core/strategy/orchestration/strategy_execution.py b/core/strategy/orchestration/strategy_execution.py
index 835d39f..5c5e121 100644
--- a/core/strategy/orchestration/strategy_execution.py
+++ b/core/strategy/orchestration/strategy_execution.py
@@ -2,17 +2,13 @@
from core.strategy.orchestration.informatives import apply_informatives
-def execute_strategy(
- *,
- strategy,
- df: pd.DataFrame,
- data_by_tf: dict[str, pd.DataFrame],
-) -> pd.DataFrame:
- """
- Shared strategy execution pipeline.
- Used by backtest and live.
- """
+def execute_strategy(*, strategy, df, **kwargs):
+ if kwargs:
+ raise TypeError(
+ f"execute_strategy does not accept extra args: {list(kwargs)}"
+ )
+
df = apply_informatives(
df=df,
strategy=strategy,
@@ -26,3 +22,5 @@ def execute_strategy(
strategy.populate_exit_trend()
return strategy.df
+
+
diff --git a/core/strategy/tests/test_execute_strategy.py b/core/strategy/tests/test_execute_strategy.py
deleted file mode 100644
index e0ea116..0000000
--- a/core/strategy/tests/test_execute_strategy.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import pandas as pd
-from core.strategy.orchestration.strategy_execution import execute_strategy
-from Strategies.Samplestrategyreport import Samplestrategyreport
-from core.strategy.tests.helper import DummyProvider
-
-
-def test_execute_strategy_pipeline():
- df = pd.DataFrame({
- "time": pd.date_range(
- "2024-01-01", periods=100, freq="1min", tz="UTC"
- ),
- "open": 100.0,
- "high": 101.0,
- "low": 99.0,
- "close": 100.0,
- "volume": 1.0,
- })
-
- strat = Samplestrategyreport(
- df=df.copy(),
- symbol="XAUUSD",
- startup_candle_count=10,
- )
-
- out = execute_strategy(
- strategy=strat,
- df=df,
- data_by_tf={"M30": df},
-
- )
-
- assert "signal_entry" in out.columns
diff --git a/docs/architecture/pipelines.md b/docs/architecture/pipelines.md
index 0058cad..757bdc2 100644
--- a/docs/architecture/pipelines.md
+++ b/docs/architecture/pipelines.md
@@ -9,147 +9,194 @@ README intentionally keeps pipelines simplified.
```mermaid
flowchart LR
- A[run] --> D
-
- %% =====================
- %% DATA
- %% =====================
- subgraph D[Data Layer]
- D1[Create backend] --> D2[OHLCV Provider + Cache]
- D2 --> D3[Load OHLCV
per symbol]
- D3 --> D4[all_data]
- end
-
- %% =====================
- %% STRATEGY
- %% =====================
- D4 --> S
- subgraph S[Strategy Layer]
- S1{Single / Multi symbol?}
- S2[run_strategy_single]
- S3[Parallel execution
ProcessPoolExecutor]
- S4[signals_df]
- S5[return dataframe
with entry signals
and exit plan]
-
- S1 -->|single| S2 --> S4
- S1 -->|multi| S3 --> S4
- S4 --> S5
- end
-
- %% =====================
- %% RESEARCH PLOT MODE
- %% =====================
- S --> P{Plot charts
symbol only
research mode}
- P -->|yes| PL[Plot charts
PNG artifacts]
- PL --> END1[Exit]
-
- %% =====================
- %% BACKTEST
- %% =====================
- P -->|no| B
- subgraph B[Execution / Backtest]
- B1{BACKTEST_MODE}
- B2[Single window]
- B3[Split windows]
- B4[Backtester.run]
- B5[return dataframe
with trades]
-
- B1 -->|single| B2 --> B4
- B1 -->|split| B3 --> B4
- B4 --> B5
- end
-
- %% =====================
- %% REPORT
- %% =====================
- B --> R{Generate report?}
- R -->|no| END2[Exit: backtest only]
- R -->|yes| REP
-
- subgraph REP[Risk & Reporting]
- R1[RiskDataPreparer]
- R2[TradeContextEnricher]
- R3[ReportRunner]
- R4[Metrics, tables, charts]
- R5[render reports:
stdout tables or html dashboard]
-
- R1 --> R2 --> R3 --> R4 --> R5
- end
-
- R5 --> END3[Exit: full run]
+
+ RUN[BacktestRunner.run]
+
+ RUN --> DATA[Data Loading]
+ DATA --> STRAT[Strategy Execution]
+ STRAT --> BT[Backtesting Engine]
+ BT --> RES[Result Build]
+ RES --> REP[Reporting]
+
+ subgraph DATA[Data Layer]
+ DL1[BacktestStrategyDataProvider]
+ DL2[CsvMarketDataCache]
+ DL3[Backend Fetch]
+ DL1 --> DL2
+ DL1 --> DL3
+ end
+
+ subgraph STRAT[Strategy Layer]
+ S1[run_strategies]
+ S2[run_strategy_single / parallel]
+ S3[apply_informatives]
+ S4[populate_indicators]
+ S5[populate_entry_trend]
+ S6[populate_exit_trend]
+ S7[build_trade_plans]
+
+ S1 --> S2
+ S2 --> S3 --> S4 --> S5 --> S6 --> S7
+ end
+
+ subgraph BT[Execution Layer]
+ B1[Backtester]
+ B2[run_backtests_single / parallel]
+ B4[execution_loop]
+
+ B2 --> B1
+ B1 --> B4
+ end
+
+ subgraph REP[Reporting Layer]
+ R1[ReportRunner]
+ R2[Per‑symbol reports]
+ R3[SummaryReportRunner]
+
+ R1 --> R2
+ R1 --> R3
+ end
```
---
## LiveTradingRunner — detailed flow
```mermaid
flowchart LR
- A[run] --> M
-
- %% =====================
- %% MT5 INIT
- %% =====================
- subgraph M[MT5 Init]
- M1[mt5.initialize] --> M2[symbol_select]
- M2 --> M3[account_info
log balance]
- end
-
- %% =====================
- %% WARMUP DATA
- %% =====================
- M --> W
- subgraph W[Warmup Data]
- W1[Resolve timeframe + lookback] --> W2[copy_rates_from_pos
bars]
- W2 --> W3[Build DataFrame
UTC time]
- W3 --> W4[df_execution]
- end
-
- %% =====================
- %% INFORMATIVE PROVIDER
- %% =====================
- W4 --> I
- subgraph I[Informative Provider]
- I1[load_strategy_class] --> I2[get_required_informatives]
- I2 --> I3[compute bars_per_tf
lookback + MIN_HTF_BARS]
- I3 --> I4[MT5Client
bars_per_tf]
- end
-
- %% =====================
- %% STRATEGY
- %% =====================
- I4 --> S
- W4 --> S
- subgraph S[Strategy Layer]
- S1[load_strategy
df_execution + symbol + startup_candle_count] --> S2[strategy]
- end
-
- %% =====================
- %% ENGINE BUILD
- %% =====================
- S2 --> E
- subgraph E[Execution / Live Engine]
- E1[MT5Adapter
dry_run flag]
- E2[TradeRepo]
- E3[PositionManager
repo + adapter]
- E4[LiveStrategyAdapter
wrap strategy]
- E5[market_data_provider
last closed candle]
- E6[LiveEngine
tick_interval_sec]
-
- E1 --> E3
- E2 --> E3
- E4 --> E6
- E5 --> E6
- E3 --> E6
- end
-
- %% =====================
- %% RUN LOOP (conceptual)
- %% =====================
- E --> R
- subgraph R[Run Loop]
- R1[engine.start] --> R2{Tick}
- R2 --> R3[market_data_provider]
- R3 --> R4[strategy_adapter
intents]
- R4 --> R5[position_manager
orders/positions]
- R5 --> R2
- end
+
+ %% =================================================
+ %% ENTRY POINT
+ %% =================================================
+ A[LiveTradingRunner.run]
+
+ %% =================================================
+ %% INIT SECTION
+ %% =================================================
+ subgraph INIT[Initialization]
+
+ B[MT5 initialize]
+ C[Symbol select]
+ D[Load Strategy Class]
+ E[Build Live Logger]
+
+ end
+
+ A --> INIT
+ INIT --> B --> C
+ INIT --> D
+ INIT --> E
+
+ %% =================================================
+ %% DATA LAYER
+ %% =================================================
+ subgraph DATA[Data Layer]
+
+ F[Strategy declares required timeframes]
+ G[MT5Client]
+ H[LiveStrategyDataProvider]
+ I[MT5MarketStateProvider]
+
+ end
+
+ D --> F
+ G --> H
+ F --> H
+ A --> H
+ A --> I
+
+ %% =================================================
+ %% STRATEGY LAYER
+ %% =================================================
+ subgraph STRATEGY[Strategy Layer]
+
+ J[LiveStrategyRunner]
+
+ subgraph DOMAIN[Strategy Domain *shared with backtest*]
+
+ K[apply_informatives]
+ L[populate_indicators]
+ M[populate_entry_trend]
+ N[populate_exit_trend]
+ O[build_trade_plans]
+
+ K --> L --> M --> N --> O
+ end
+
+ end
+
+ H --> J
+ D --> J
+ J --> K
+
+ %% =================================================
+ %% EXECUTION LAYER
+ %% =================================================
+ subgraph EXECUTION[Execution Layer]
+
+ P[LiveEngine]
+ Q[PositionManager]
+ R[TradeRepo / TradeStateService]
+ S[MT5Adapter]
+ T[MetaTrader5 API]
+
+ end
+
+ %% =================================================
+ %% RUNTIME FLOW
+ %% =================================================
+
+ %% Market state into engine
+ I --> P
+
+ %% Trade plans into engine
+ O --> P
+
+ %% Engine orchestration
+ P --> Q
+ Q --> R
+ Q --> S
+ S --> T
+
+ %% =================================================
+ %% FEEDBACK LOOPS
+ %% =================================================
+
+ T -->|broker-driven exits| Q
+ P -->|tick loop| P
+```
+
+## BacktestDataProvider — detailed flow
+```mermaid
+flowchart TB
+ %% ===== Nodes =====
+ U[Caller / Runner] --> P[BacktestStrategyDataProvider.get_ohlcv]
+
+ P -->|1 cache.coverage symbol, timeframe| C[CsvMarketDataCache]
+ C -->|returns: None or cov_start, cov_end| P
+
+ %% ===== Decision: cache coverage =====
+ P --> D1{Coverage exists?}
+
+ D1 -- No --> B1[backend.fetch_ohlcv full range]
+ B1 --> N1[_validate/_normalize provider-side]
+ N1 -->|cache.save... if fetched| C
+ N1 --> R1[return merged DF]
+
+ D1 -- Yes --> D2{Missing pre-range?}
+ D2 -- Yes --> B2[backend.fetch_ohlcv pre range]
+ B2 --> N2[_validate/_normalize]
+ N2 -->|cache.append pre if non-empty| C
+
+ D2 -- No --> D3{Missing post-range?}
+
+ D3 -- Yes --> B3[backend.fetch_ohlcv post range]
+ B3 --> N3[_validate/_normalize]
+ N3 -->|cache.append post if non-empty| C
+
+ %% ===== Load middle part =====
+ D3 -- No --> M[cache.load_range requested range]
+ N2 --> M
+ N3 --> M
+
+ M --> R2[merge pre + mid + post]
+ R2 --> R3[return merged DF]
```
\ No newline at end of file
diff --git a/docs/dev/how-to-run.md b/docs/dev/how-to-run.md
index 9377b86..d86e694 100644
--- a/docs/dev/how-to-run.md
+++ b/docs/dev/how-to-run.md
@@ -1,36 +1,61 @@
# Developer setup
-This document describes a minimal local setup for running the framework.
+This document describes a **minimal local setup** required to run the framework
+for backtesting, research and (optionally) live trading.
+
+The project is designed to run deterministically in offline mode on any OS,
+with live trading supported via MetaTrader 5 on Windows.
---
## Requirements
-- Python 3.10+ (recommended)
-- Windows + MetaTrader 5 (for live trading via MT5)
-- Any OS (for backtests, research, reporting)
+### Core
+- Python **3.10+** (recommended)
+- Git
+
+### Backtesting / Research
+- Any OS (Windows, macOS, Linux)
+
+### Live trading (optional)
+- Windows
+- MetaTrader 5 terminal
+- Broker account supported by MT5
---
-## Install
+## Environment setup
-Create and activate a virtual environment, then install dependencies:
+Create and activate a virtual environment:
```bash
python -m venv .venv
-# Windows:
-.venv\\Scripts\\activate
-# macOS/Linux:
-source .venv/bin/activate
-pip install -U pip
-pip install -r requirements.txt
+# Windows
+.venv\Scripts\activate
+
+# macOS / Linux
+source .venv/bin/activate
```
+
+## Configuration
+
+A default backtest configuration is provided in:
+The configuration is **predefined and ready to use** for the first run:
+- includes a sample strategy
+- defines symbols, timeframes and timerange
+- points to default data and results directories
+
+No configuration changes are required to run the initial backtest.
+The file serves both as a runnable default and as a reference
+for further experimentation.
## Backtest run (minimal)
-Run a small backtest first to validate the environment.
-Use a small timerange and a single symbol to reduce debug surface.
+Start with a small backtest to validate the environment and data pipeline.
+
+Using a limited timerange and a single symbol is recommended for the first run,
+as historical data may need to be downloaded and cached.
+
```bash
python backtest_run.py
-```
-
+```
\ No newline at end of file
diff --git a/docs/dev/performance.md b/docs/dev/performance.md
new file mode 100644
index 0000000..f414c83
--- /dev/null
+++ b/docs/dev/performance.md
@@ -0,0 +1,183 @@
+# Performance & Profiling
+
+This document describes **observed performance characteristics**, profiling results
+and optimization strategy of the framework.
+
+Performance is treated as a **first-class architectural concern**, not as an afterthought.
+
+---
+
+## Test scenario
+
+Representative backtest run used for profiling:
+
+- Symbols: 2 (EURUSD, XAUUSD)
+- Base timeframe: M1
+- Informative timeframe: M30
+- Historical range: ~6 years
+- OHLCV rows:
+ - ~520k bars per symbol (M1)
+ - ~18k bars per symbol (M30)
+- Strategy:
+ - multi-timeframe
+ - feature-heavy (market structure, volatility, regimes)
+- Execution:
+ - full cost model
+ - parallel backtest execution
+
+---
+
+## End-to-end runtime breakdown
+
+Measured wall-clock times (single run):
+
+| Stage | Time |
+|---------------------------------|----------|
+| Data loading & caching | ~22s |
+| Strategy execution (features + signals + plans) | ~25s |
+| Backtest execution (parallel) | ~1.9s |
+| Reporting & persistence | <1s |
+| **Total runtime** | **~50s** |
+
+Timing is collected using structured instrumentation across pipeline stages.
+
+---
+
+## Profiling methodology
+
+Profiling was performed using Python `cProfile` with cumulative time analysis:
+
+- Full pipeline profiling (not isolated microbenchmarks)
+- Focus on **hot paths**, not function call counts
+- Profiling reflects **real workload**, not synthetic data
+
+---
+
+## Profiling summary (high level)
+
+Profiling confirms that execution time is dominated by **expected domains**:
+
+1. **Data access & preprocessing**
+ - Historical OHLCV loading
+ - Cache merging and validation
+
+2. **Feature engineering**
+ - Market structure analysis
+ - Dependency-aware feature computation
+
+3. **Vectorized pandas operations**
+ - Series access
+ - Column-wise transformations
+
+Orchestration, execution control flow and trade simulation introduce **negligible overhead**.
+
+This validates the architectural separation between:
+- deterministic computation
+- orchestration
+- execution side effects
+
+---
+
+## Hot paths (qualitative)
+
+Key contributors to cumulative runtime:
+
+- Historical data provider (`fetch`, `_get_ohlcv`)
+- Strategy `populate_indicators`
+- FeatureEngineering pipeline
+- MarketStructureEngine (pivots, price action, regimes)
+- Pandas Series access and combination
+
+Notably **absent** from hot paths:
+
+- Strategy orchestration logic
+- Execution engine control flow
+- Trade repository operations
+- Logging and instrumentation
+
+This indicates a **healthy performance profile**:
+time is spent where domain complexity lives.
+
+---
+
+## Feature engineering cost model
+
+Feature computation dominates strategy runtime by design.
+
+Key properties:
+
+- Deterministic execution order
+- Explicit feature dependencies
+- No implicit recomputation
+- Reusable context across features
+
+Market structure analysis is treated as a **feature extraction problem**,
+not as strategy logic, making it:
+
+- measurable
+- optimizable
+- replaceable
+
+---
+
+## Pandas overhead
+
+Profiling shows measurable overhead in:
+
+- `Series.__getitem__`
+- `Series.combine`
+- Generic dataframe access
+
+This overhead is:
+- explicit
+- measurable
+- localized to feature computation
+
+It does **not** leak into orchestration or execution layers.
+
+---
+
+## Optimization strategy
+
+Optimization is intentionally **incremental and targeted**.
+
+Planned directions:
+
+1. **Numba acceleration**
+ - Only for confirmed hot paths
+ - Focus on pivot detection and structural calculations
+ - Preserve deterministic semantics
+
+2. **Data representation**
+ - Evaluate narrower arrays for selected features
+ - Avoid premature rewrites
+
+3. **Batch execution**
+ - Reuse intermediate feature context where possible
+ - Avoid recomputation across symbols and timeframes
+
+Because hot paths are explicitly identified,
+optimization can be applied without architectural refactors.
+
+---
+
+## Design takeaway
+
+Profiling confirms that:
+
+- Architecture scales as intended
+- Performance cost is dominated by domain logic, not framework overhead
+- Feature engineering is the correct optimization target
+- Execution and orchestration layers remain lightweight
+
+This allows performance work to proceed **safely and predictably**,
+without compromising correctness or determinism.
+
+---
+
+## Notes
+
+Performance measurements are hardware-dependent and provided for
+architectural insight rather than absolute benchmarking.
+
+All measurements were obtained from real end-to-end runs.
\ No newline at end of file
diff --git a/docs/dev/profiling.md b/docs/dev/profiling.md
deleted file mode 100644
index fd343d9..0000000
--- a/docs/dev/profiling.md
+++ /dev/null
@@ -1,55 +0,0 @@
-# Profiling and performance workflow
-
-Performance is treated as an engineering concern:
-measure first, optimize second.
-
----
-
-## Principles
-
-- Prefer vectorized pandas over row-wise loops
-- Reduce dataframe copies and intermediate allocations
-- Cache intermediate results via DAG context
-- Use numba only for identified hot paths
-
----
-
-## What to measure
-
-- total pipeline runtime (end-to-end)
-- feature module runtime distribution
-- strategy runtime
-- execution engine runtime
-- peak memory usage (large datasets)
-
----
-
-## Practical workflow
-
-1) Run a small dataset to validate correctness.
-2) Run a medium dataset to locate hotspots.
-3) Profile the hottest functions/modules.
-4) Optimize one hotspot at a time.
-5) Re-run with the same input to confirm deterministic behavior.
-
----
-
-## Benchmark hygiene
-
-When measuring performance:
-- use the same dataset and timerange
-- disable background tasks
-- run multiple times and compare variance
-- track results in a simple log or markdown table
-
----
-
-## Stress tests
-
-Stress tests validate stability under load:
-- intentionally high signal density
-- large number of trades
-- long timeranges
-
-The goal is not “pretty backtest results”.
-The goal is stable performance and predictable execution behavior.
\ No newline at end of file
diff --git a/docs/index.html b/docs/index.html
index 0078aef..cce2241 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -13,7 +13,7 @@