diff --git a/.gitignore b/.gitignore index 8bee251..68285be 100644 --- a/.gitignore +++ b/.gitignore @@ -90,4 +90,5 @@ Strategies/* **/notes.md # But keep this specific file -!Strategies/Samplestrategy.py \ No newline at end of file +!Strategies/Samplestrategy.py +!Strategies/Samplestrategyreport.py \ No newline at end of file diff --git a/Strategies/Samplestrategyreport.py b/Strategies/Samplestrategyreport.py new file mode 100644 index 0000000..a07c2ab --- /dev/null +++ b/Strategies/Samplestrategyreport.py @@ -0,0 +1,338 @@ +import pandas as pd +import talib.abstract as ta + +from FeatureEngineering.Indicators import indicators as qtpylib +from FeatureEngineering.MarketStructure.engine import MarketStructureEngine +from core.reporting.core.context import ContextSpec +from core.reporting.core.metrics import ExpectancyMetric, MaxDrawdownMetric +from core.strategy.base import BaseStrategy +from core.strategy.informatives import informative + + +class Samplestrategyreport(BaseStrategy): + + def __init__( + self, + df, + symbol, + startup_candle_count, + ): + super().__init__( + df=df, + symbol=symbol, + startup_candle_count=startup_candle_count, + ) + + strategy_config = { + "USE_TP1": True, + "USE_TP2": False, + + "USE_TRAILING": True, + "TRAIL_FROM": "tp1", # "entry" | "tp1" + + "TRAIL_MODE": "ribbon", + "SWING_LOOKBACK": 5, + + "ALLOW_TP2_WITH_TRAILING": False, + } + + @informative('M30') + def populate_indicators_M30(self, df: pd.DataFrame): + + df['rma_33_low'] = qtpylib.rma(df, df['low'], 33) + df['rma_33_high'] = qtpylib.rma(df, df['high'], 33) + + df['rma_144_low'] = qtpylib.rma(df, df['low'], 144) + df['rma_144_high'] = qtpylib.rma(df, df['high'], 144) + + df["atr"] = ta.ATR(df, 14) + + # --- market structure HTF + df = MarketStructureEngine.apply( + df, + features=[ + "pivots", + "price_action", + "follow_through", + "structural_vol", + "trend_regime", + ], + ) + + + return df + + def populate_indicators(self) -> None: + df = self.df + + + df['atr'] = ta.ATR(df, 14) + + + + df['rma_33_low'] = qtpylib.rma(df, df['low'], 33) + df['rma_33_high'] = qtpylib.rma(df, df['high'], 33) + + df['rma_144_low'] = qtpylib.rma(df, df['low'], 144) + df['rma_144_high'] = qtpylib.rma(self.df, df['high'], 144) + + df['sl_long'] =df['rma_33_low'] #df['close'] - (1 * df['atr']) + df['sl_short'] = df['rma_33_high'] #df['close'] + (1* df['atr']) + + df['low_5'] = df['low'].rolling(5).min() + df['high_5'] = df['high'].rolling(5).max() + df['low_15'] = df['low'].rolling(15).min() + df['high_15'] = df['high'].rolling(15).max() + + df['fast_rma_upper_than_slow'] = None + df['slow_rma_uprising'] = None + df['fast_rma_uprising'] = None + + df['fast_rma_upper_than_slow'] = df['rma_33_low'] > df['rma_144_high'] + df['fast_rma_lower_than_slow'] = df['rma_33_high'] < df['rma_144_low'] + + df['slow_rma_uprising'] = df['rma_144_low'] > df['rma_144_low'].shift(1) + df['fast_rma_uprising'] = df['rma_33_low'] > df['rma_33_low'].shift(1) + + df = MarketStructureEngine.apply( + df, + features=[ + "pivots", + "price_action", + "follow_through", + "structural_vol", + "trend_regime", + ], + ) + + self.df = df + + def populate_entry_trend(self) -> None: + df = self.df + """ + Buduje sygnały wejścia łączące: + - kierunek sesyjny (sessions_signal) + - kierunek dnia (prev_day_direction) + - bias rynkowy (session_bias) + - strefy HTF/LTF (OB, FVG, Breaker) + """ + + + + # --- 🔹 5. Maski logiczne --- + long_mask_rma_33 = ( + (df['close'] > df['open']) & + (df['fast_rma_upper_than_slow']) & # HTF trend + (df['low'] <= df['rma_33_high']) & # pullback into ribbon + (df['close'] > df['rma_33_high']) & # rejection + (df['slow_rma_uprising'] ) & # trend still rising + (df['close'] > df['rma_144_low_M30']) + ) + + long_mask_rma_144 = ( + (df['close'] > df['open']) & + (df['fast_rma_upper_than_slow']) & # HTF trend + (df['low'] <= df['rma_144_high']) & # pullback into ribbon + (df['close'] > df['rma_144_high']) & # rejection + (df['rma_144_low'] > df['rma_144_low'].shift(1)) # trend still rising + & (df['close'] > df['rma_144_low_M30']) + ) + + short_mask = ( + (df['close'] < df['open']) & + (df['rma_33_high'] < df['rma_144_low']) & # trend down + (df['high'] >= df['rma_33_low']) & # pullback + (df['close'] < df['rma_33_low']) & # rejection + (df['rma_33_high'] < df['rma_33_high'].shift(1)) # falling impulse + & (df['close'] < df['rma_144_high_M30']) + ) + + short_mask_2 = ( + (df['close'] < df['open']) & + (df['fast_rma_lower_than_slow']) & # trend down + (df['high'] >= df['rma_144_low']) & # pullback + (df['close'] < df['rma_144_low']) & # rejection + (df['rma_144_high'] < df['rma_144_high'].shift(1)) # falling impulse + & (df['close'] < df['rma_144_high_M30']) + ) + + df["signal_entry"] = None + + idx = df.index[long_mask_rma_33] + df.loc[idx, "signal_entry"] = [{"direction": "long", "tag": "LONG SETUP 1"}] * len(idx) + + idx = df.index[long_mask_rma_144] + df.loc[idx, "signal_entry"] = [{"direction": "long", "tag": "LONG SETUP 2"}] * len(idx) + + idx = df.index[short_mask] + df.loc[idx, "signal_entry"] = [{"direction": "short", "tag": "SHORT SETUP 1"}] * len(idx) + + idx = df.index[short_mask_2] + df.loc[idx, "signal_entry"] = [{"direction": "short", "tag": "SHORT SETUP 2"}] * len(idx) + + + # --- 🔹 7. Poziomy SL/TP --- + has_signals = df["signal_entry"].apply(bool) + df.loc[has_signals, "levels"] = df.loc[has_signals].apply( + lambda row: self.calculate_levels(row["signal_entry"], row), + axis=1 + ) + + + + self.df = df + + + + def populate_exit_trend(self): + self.df["signal_exit"] = None + self.df["custom_stop_loss"] = None + + def compute_sl( + self, + *, + row, + direction, + min_atr_mult=0.5, + min_pct=0.001, + ): + """ + Zwraca: + - sl_level + - sl_source: 'struct' | 'min' + """ + + close = row["close"] + atr = row["atr"] + + # ========================= + # SL STRUKTURALNY + # ========================= + + if direction == "long": + sl_structural = min(row["low_15"], row["low_5"]) - atr * 0.5 + else: + sl_structural = max(row["high_15"], row["high_5"]) + atr * 0.5 + + # ========================= + # MINIMALNY SL + # ========================= + + min_sl_atr = atr * min_atr_mult + min_sl_pct = close * min_pct + min_distance = max(min_sl_atr, min_sl_pct) + + if direction == "long": + sl_min = close - min_distance + + if sl_structural < sl_min: + return sl_structural, "struct" + else: + return sl_min, "min" + + else: + sl_min = close + min_distance + + if sl_structural > sl_min: + return sl_structural, "struct" + else: + return sl_min, "min" + + def calculate_levels(self, signals, row): + + if not isinstance(signals, dict): + return None + + direction = signals["direction"] + close = row["close"] + + sl, sl_source = self.compute_sl( + row=row, + direction=direction, + min_atr_mult=1, + min_pct=0.001 + ) + + risk = abs(close - sl) + + # ============================ + # MICROSTRUCTURE-AWARE TP + # ============================ + + tp1_mult = 1 + tp2_mult = 2 + + if direction == "long": + tp1_level = close + risk * tp1_mult + tp2_level = close + risk * tp2_mult + else: + tp1_level = close - risk * tp1_mult + tp2_level = close - risk * tp2_mult + + return { + "SL": { + "level": sl, + "tag": f"{sl_source}" + }, + "TP1": { + "level": tp1_level, + "tag": f"RR 1:{tp1_mult} from {sl_source}" + }, + "TP2": { + "level": tp2_level, + "tag": f"RR 1:{tp2_mult} from {sl_source}" + }, + } + + def build_report_spec(self): + + return ( + super() + .build_report_spec() + .add_metric(ExpectancyMetric()) + .add_metric(MaxDrawdownMetric()) + .add_context( + ContextSpec( + name="trend regime HTF", + column="trend_regime_M30", + source="entry_candle" + ) + ) + .add_context( + ContextSpec( + name="trend regime LTF", + column="trend_regime", + source="entry_candle" + ) + ) + + .add_context( + ContextSpec( + name="trend strength HTF", + column="trend_strength_M30", + source="entry_candle" + ) + ) + .add_context( + ContextSpec( + name="trend strength LTF", + column="trend_strength", + source="entry_candle" + ) + ) + + .add_context( + ContextSpec( + name="trend bias HTF", + column="trend_bias_M30", + source="entry_candle" + ) + ) + .add_context( + ContextSpec( + name="trend bias LTF", + column="trend_bias", + source="entry_candle" + ) + ) + ) \ No newline at end of file diff --git a/backtest_run.py b/backtest_run.py index f973fce..d6b1937 100644 --- a/backtest_run.py +++ b/backtest_run.py @@ -4,7 +4,6 @@ if __name__ == "__main__": import cProfile - import pstats from pathlib import Path from datetime import datetime diff --git a/config/backtest.py b/config/backtest.py index 24610a5..a4faa09 100644 --- a/config/backtest.py +++ b/config/backtest.py @@ -2,7 +2,7 @@ from enum import Enum from pathlib import Path -from config.logger_config import LoggerConfig +from core.logging.config import LoggerConfig logging.basicConfig(level=logging.INFO) @@ -121,7 +121,6 @@ class StdoutMode(str, Enum): PLOT_ONLY = False # Skip backtest, just plots SAVE_TRADES_CSV = False # Legacy / debug only -from config.logger_config import LoggerConfig LOGGER_CONFIG = LoggerConfig( stdout=True, diff --git a/config/live.py b/config/live.py index 85a2297..61fb6cf 100644 --- a/config/live.py +++ b/config/live.py @@ -16,7 +16,7 @@ "SERVER": "SERVER", } -DRY_RUN = False +DRY_RUN = True TICK_INTERVAL_SEC = 1.0 @@ -24,9 +24,9 @@ # STRATEGY # ================================================== -STRATEGY_CLASS = "Samplestrategy" +STRATEGY_CLASS = "Samplestrategyreport" -SYMBOLS = "EURUSD" +SYMBOLS = "BTCUSD" TIMEFRAME = "M1" diff --git a/config/logger_config.py b/config/logger_config.py deleted file mode 100644 index 21f142e..0000000 --- a/config/logger_config.py +++ /dev/null @@ -1,137 +0,0 @@ -from dataclasses import dataclass -from pathlib import Path -import logging -from contextlib import contextmanager -from time import perf_counter -import cProfile -import pstats - - -LOG_PREFIX = { - "DATA": "📈 DATA PREPARER |", - "BT": "🧪 BACKTEST |", - "STRAT": "📐 STRATEGY |", - "REPORT": "📊 REPORT |", - "RUNNER": "🚀 RUN |", -} - -@dataclass -class LoggerConfig: - stdout: bool = True - file: bool = False - timing: bool = True - profiling: bool = False - log_dir: Path | None = None - - -class RunLogger: - def __init__(self, name: str, cfg: LoggerConfig, prefix: str = ""): - self.cfg = cfg - self.name = name - self.prefix = prefix - - self._t0 = perf_counter() - self._t_last = self._t0 - - self.logger = logging.getLogger(name) - self.logger.setLevel(logging.INFO) - self.logger.handlers.clear() - self.logger.propagate = False - self._timings: dict[str, float] = {} - - if cfg.stdout: - h = logging.StreamHandler() - h.setFormatter(logging.Formatter("%(message)s")) - self.logger.addHandler(h) - - if cfg.file: - if cfg.log_dir is None: - raise ValueError("LoggerConfig.file=True requires log_dir") - - cfg.log_dir.mkdir(parents=True, exist_ok=True) - path = cfg.log_dir / f"{name}.log" - - fh = logging.FileHandler(path, encoding="utf-8") - fh.setFormatter( - logging.Formatter("%(asctime)s | %(message)s") - ) - self.logger.addHandler(fh) - - # -------------------- - # BASIC LOG - # -------------------- - def log(self, msg: str): - if self.prefix: - self.logger.info(f"{self.prefix} {msg}") - else: - self.logger.info(msg) - - # -------------------- - # TIMED STEP - # -------------------- - def step(self, label: str): - if not self.cfg.timing: - return - - now = perf_counter() - delta = now - self._t_last - total = now - self._t0 - - msg = f"⏱️ {label:<30} +{delta:6.2f}s | total {total:6.2f}s" - self.log(msg) - - self._t_last = now - - def get_timings(self) -> dict[str, float]: - """ - Return collected timing sections. - """ - return dict(self._timings) - - # -------------------- - # CONTEXT TIMER - # -------------------- - @contextmanager - def time(self, label: str): - if not self.cfg.timing: - yield - return - - t0 = perf_counter() - yield - dt = perf_counter() - t0 - self.logger.info(f"⏱️ {label:<30} {dt:6.3f}s") - - @contextmanager - def section(self, name: str): - t0 = perf_counter() - yield - dt = perf_counter() - t0 - self._timings[name] = self._timings.get(name, 0.0) + dt - - -@contextmanager -def profiling(enabled: bool, path): - if not enabled: - yield - return - - pr = cProfile.Profile() - pr.enable() - yield - pr.disable() - stats = pstats.Stats(pr) - stats.sort_stats("cumtime") - stats.dump_stats(path) - - -class NullLogger: - @contextmanager - def section(self, name: str): - yield - - def log(self, msg: str): - pass - - def get_timings(self) -> dict[str, float]: - return {} \ No newline at end of file diff --git a/core/backtesting/engine/worker.py b/core/backtesting/engine/worker.py index c692343..1cf7961 100644 --- a/core/backtesting/engine/worker.py +++ b/core/backtesting/engine/worker.py @@ -1,9 +1,10 @@ import pandas as pd -from config.logger_config import RunLogger, LoggerConfig from core.backtesting.engine.backtester import Backtester from core.backtesting.strategy_runner import strategy_orchestration +from core.logging.config import LoggerConfig +from core.logging.run_logger import RunLogger def run_backtest_worker( diff --git a/core/backtesting/runner.py b/core/backtesting/runner.py index 782d368..2928039 100644 --- a/core/backtesting/runner.py +++ b/core/backtesting/runner.py @@ -2,11 +2,11 @@ from concurrent.futures import ProcessPoolExecutor, as_completed from datetime import datetime from pathlib import Path +from time import perf_counter from uuid import uuid4 import pandas as pd -from config.logger_config import RunLogger, profiling from config.report_config import ReportConfig, StdoutMode from core.backtesting.backend_factory import create_backtest_backend from core.backtesting.engine.backtester import Backtester @@ -15,8 +15,10 @@ from core.backtesting.results_logic.result import BacktestResult from core.backtesting.results_logic.store import ResultStore from core.backtesting.strategy_runner import strategy_orchestration -from core.data_provider import DefaultOhlcvDataProvider, CsvMarketDataCache +from core.data_provider import BacktestStrategyDataProvider, CsvMarketDataCache from core.live_trading.strategy_loader import load_strategy_class +from core.logging.profiling import profiling +from core.logging.run_logger import RunLogger from core.reporting.runner import ReportRunner from core.reporting.summary_runner import SummaryReportRunner @@ -87,31 +89,26 @@ def load_data(self) -> dict[str, dict[str, pd.DataFrame]]: start = pd.Timestamp(self.cfg.TIMERANGE["start"], tz="UTC") end = pd.Timestamp(self.cfg.TIMERANGE["end"], tz="UTC") - self.provider = DefaultOhlcvDataProvider( + self.provider = BacktestStrategyDataProvider( backend=backend, cache=CsvMarketDataCache(self.cfg.MARKET_DATA_PATH), backtest_start=start, backtest_end=end, + required_timeframes=all_tfs, + startup_candle_count=self.cfg.STARTUP_CANDLE_COUNT, logger=self.log_data, ) - all_data = {} + all_data: dict[str, dict[str, pd.DataFrame]] = {} with self.log_data.time("load_all"): for symbol in self.cfg.SYMBOLS: - per_symbol = {} - for tf in all_tfs: - per_symbol[tf] = self.provider.get_ohlcv( - symbol=symbol, - timeframe=tf, - start=start, - end=end, - ) - all_data[symbol] = per_symbol + all_data[symbol] = self.provider.fetch(symbol) self.log_data.log( f"summary | symbols={len(all_data)} timeframes={len(all_tfs)}" ) + return all_data # ================================================== # 2️⃣ STRATEGY EXECUTION @@ -320,6 +317,9 @@ def _build_result(self) -> BacktestResult: # ================================================== def run(self): + + t0 = perf_counter() + self.run_path = Path( f"results/run_{datetime.now().strftime('%Y%m%d_%H%M%S')}" ) @@ -366,4 +366,7 @@ def run(self): run_path=run_path, ).run() + total = perf_counter() - t0 + self.log_run.log(f"TOTAL {total:,.3f}s") + self.log_run.log("finished") diff --git a/core/backtesting/strategy_runner.py b/core/backtesting/strategy_runner.py index d2235ec..475a790 100644 --- a/core/backtesting/strategy_runner.py +++ b/core/backtesting/strategy_runner.py @@ -3,7 +3,8 @@ import pandas as pd -from config.logger_config import RunLogger, NullLogger +from core.logging.null_logger import NullLogger +from core.logging.run_logger import RunLogger from core.strategy.orchestration.informatives import apply_informatives from core.strategy.plan_builder import PlanBuildContext from core.utils.timeframe import tf_to_minutes diff --git a/core/data_provider/__init__.py b/core/data_provider/__init__.py index ffb92ca..41ed177 100644 --- a/core/data_provider/__init__.py +++ b/core/data_provider/__init__.py @@ -1,13 +1,13 @@ from core.data_provider.cache.csv_cache import CsvMarketDataCache from core.data_provider.contracts import MarketDataBackend -from core.data_provider.providers.default_provider import DefaultOhlcvDataProvider +from core.data_provider.providers.default_provider import BacktestStrategyDataProvider from core.data_provider.errors import DataNotAvailable __all__ = [ "MarketDataBackend", "CsvMarketDataCache", - "DefaultOhlcvDataProvider", + "BacktestStrategyDataProvider", ] diff --git a/core/data_provider/clients/mt5_client.py b/core/data_provider/clients/mt5_client.py index ee8435b..f04290d 100644 --- a/core/data_provider/clients/mt5_client.py +++ b/core/data_provider/clients/mt5_client.py @@ -1,19 +1,14 @@ import pandas as pd import MetaTrader5 as mt5 -from core.data_provider.contracts import MarketDataProvider +from core.data_provider.contracts import LiveMarketDataClient from core.utils.lookback import LOOKBACK_CONFIG from core.utils.timeframe import MT5_TIMEFRAME_MAP -class MT5Client(MarketDataProvider): +class MT5Client(LiveMarketDataClient): - def __init__( - self, - *, - bars_per_tf: dict[str, int]): - self.bars_per_tf = bars_per_tf @staticmethod def _fetch_ohlcv( diff --git a/core/data_provider/contracts.py b/core/data_provider/contracts.py index e93642c..5765225 100644 --- a/core/data_provider/contracts.py +++ b/core/data_provider/contracts.py @@ -15,8 +15,25 @@ def fetch_ohlcv( ) -> pd.DataFrame: ... +class CsvMarketDataCache(Protocol): + def coverage(self, *, symbol: str, timeframe: str): ... + def load_range(self, *, symbol: str, timeframe: str, start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame: ... + def save(self, *, symbol: str, timeframe: str, df: pd.DataFrame) -> None: ... + def append(self, *, symbol: str, timeframe: str, df: pd.DataFrame) -> None: ... + + +class StrategyDataProvider(Protocol): + """ + Strategy-level data contract. + """ + def fetch(self, symbol: str) -> dict[str, pd.DataFrame]: + ... + -class MarketDataProvider(ABC): +class LiveMarketDataClient(ABC): + """ + Low-level live market data client. + """ @abstractmethod def get_ohlcv( @@ -26,15 +43,4 @@ def get_ohlcv( timeframe: str, bars: int, ) -> pd.DataFrame: - pass - - -class CsvMarketDataCache(Protocol): - def coverage(self, *, symbol: str, timeframe: str): ... - def load_range(self, *, symbol: str, timeframe: str, start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame: ... - def save(self, *, symbol: str, timeframe: str, df: pd.DataFrame) -> None: ... - def append(self, *, symbol: str, timeframe: str, df: pd.DataFrame) -> None: ... - - -class OhlcvProvider(Protocol): - def get_ohlcv(self, *, symbol: str, timeframe: str, start: pd.Timestamp, end: pd.Timestamp,) -> pd.DataFrame: ... \ No newline at end of file + ... \ No newline at end of file diff --git a/core/data_provider/providers/backtest_provider.py b/core/data_provider/providers/backtest_provider.py new file mode 100644 index 0000000..e69de29 diff --git a/core/data_provider/providers/default_provider.py b/core/data_provider/providers/default_provider.py index 135bbe7..f17d1cc 100644 --- a/core/data_provider/providers/default_provider.py +++ b/core/data_provider/providers/default_provider.py @@ -2,12 +2,11 @@ import pandas as pd -from config.logger_config import RunLogger from core.data_provider.ohlcv_schema import sort_and_deduplicate, ensure_utc_time from core.utils.timeframe import timeframe_to_pandas_freq -class DefaultOhlcvDataProvider: +class BacktestStrategyDataProvider: """ BACKTEST OHLCV provider. @@ -18,18 +17,22 @@ class DefaultOhlcvDataProvider: """ def __init__( - self, - *, - backend, - cache, - backtest_start: pd.Timestamp, - backtest_end: pd.Timestamp, - logger + self, + *, + backend, + cache, + backtest_start: pd.Timestamp, + backtest_end: pd.Timestamp, + required_timeframes: list[str], + startup_candle_count: int, + logger, ): self.backend = backend self.cache = cache self.backtest_start = self._to_utc(backtest_start) self.backtest_end = self._to_utc(backtest_end) + self.required_timeframes = required_timeframes + self.startup_candle_count = startup_candle_count self.logger = logger # ------------------------------------------------- @@ -72,7 +75,7 @@ def shift_time_by_candles( # Main API # ------------------------------------------------- - def get_ohlcv( + def _get_ohlcv( self, *, symbol: str, @@ -185,7 +188,7 @@ def get_ohlcv( # Informative data # ------------------------------------------------- - def get_informative_df( + def _get_informative_df( self, *, symbol: str, @@ -207,7 +210,7 @@ def get_informative_df( candles=startup_candle_count, ) - df = self.get_ohlcv( + df = self._get_ohlcv( symbol=symbol, timeframe=timeframe, start=extended_start, @@ -215,3 +218,26 @@ def get_informative_df( ) return df.copy() + + def fetch(self, symbol: str) -> dict[str, pd.DataFrame]: + """ + Strategy-level data fetch for BACKTEST. + + Returns: + data_by_tf: dict[timeframe, DataFrame] + """ + + data: dict[str, pd.DataFrame] = {} + + # strategia deklaruje wymagane TF + # (dokładnie jak w backteście dziś) + # np. runner już to wie + for tf in self.required_timeframes: + df = self._get_informative_df( + symbol=symbol, + timeframe=tf, + startup_candle_count=self.startup_candle_count, + ) + data[tf] = df + + return data diff --git a/core/data_provider/providers/live_provider.py b/core/data_provider/providers/live_provider.py new file mode 100644 index 0000000..bf83b27 --- /dev/null +++ b/core/data_provider/providers/live_provider.py @@ -0,0 +1,33 @@ +import pandas as pd + +from core.data_provider.contracts import LiveMarketDataClient + + +class LiveStrategyDataProvider: + """ + Strategy-level provider for LIVE trading. + + Adapts any LiveMarketDataClient to StrategyDataProvider. + """ + + def __init__( + self, + *, + client: LiveMarketDataClient, + bars_per_tf: dict[str, int], + ): + self.client = client + self.bars_per_tf = bars_per_tf + + def fetch(self, symbol: str) -> dict[str, pd.DataFrame]: + data: dict[str, pd.DataFrame] = {} + + for tf, bars in self.bars_per_tf.items(): + df = self.client.get_ohlcv( + symbol=symbol, + timeframe=tf, + bars=bars, + ) + data[tf] = df + + return data \ No newline at end of file diff --git a/core/data_provider/tests/test_default_provider.py b/core/data_provider/tests/test_default_provider.py index 3cd9d05..7e48b23 100644 --- a/core/data_provider/tests/test_default_provider.py +++ b/core/data_provider/tests/test_default_provider.py @@ -1,9 +1,10 @@ import pandas as pd -from config.logger_config import NullLogger from core.data_provider import CsvMarketDataCache -from core.data_provider.providers.default_provider import DefaultOhlcvDataProvider +from core.data_provider.providers.default_provider import BacktestStrategyDataProvider +from core.logging.null_logger import NullLogger + def test_no_cache_fetches_and_saves(tmp_path, utc): @@ -19,21 +20,24 @@ def test_no_cache_fetches_and_saves(tmp_path, utc): ("EURUSD", "M1", start, end): df_full, }) - p = DefaultOhlcvDataProvider( + p = BacktestStrategyDataProvider( backend=backend, cache=cache, backtest_start=start, backtest_end=end, + required_timeframes=["M1"], + startup_candle_count=0, logger=NullLogger(), ) - out = p.get_ohlcv(symbol="EURUSD", timeframe="M1", start=start, end=end) + out = p.fetch(symbol="EURUSD") + df = out["M1"] # assert assert len(backend.calls) == 1 - assert out["time"].is_monotonic_increasing - assert out["time"].dt.tz is not None - assert out["close"].iloc[-1] == 999 + assert df["time"].is_monotonic_increasing + assert df["time"].dt.tz is not None + assert df["close"].iloc[-1] == 999 def test_missing_before_fetches_pre_and_appends(tmp_path, utc): @@ -49,24 +53,29 @@ def test_missing_before_fetches_pre_and_appends(tmp_path, utc): end = utc("2022-01-01 00:10:00") from core.data_provider.tests.conftest import FakeBackend, make_ohlcv - df_pre = make_ohlcv("2022-01-01 00:00:00", periods=5, freq="1min") # 00:00..00:04 + df_pre = make_ohlcv("2022-01-01 00:00:00", periods=5, freq="1min") backend = FakeBackend({ ("EURUSD", "M1", utc("2022-01-01 00:00:00"), utc("2022-01-01 00:05:00")): df_pre }) - from core.data_provider.providers.default_provider import DefaultOhlcvDataProvider - p = DefaultOhlcvDataProvider( - backend=backend, cache=cache, - backtest_start=start, backtest_end=end, + from core.data_provider.providers.default_provider import BacktestStrategyDataProvider + p = BacktestStrategyDataProvider( + backend=backend, + cache=cache, + backtest_start=start, + backtest_end=end, + required_timeframes=["M1"], + startup_candle_count=0, logger=NullLogger(), ) - out = p.get_ohlcv(symbol="EURUSD", timeframe="M1", start=start, end=end) + out = p.fetch(symbol="EURUSD") + df = out["M1"] assert backend.calls == [("EURUSD","M1", utc("2022-01-01 00:00:00"), utc("2022-01-01 00:05:00"))] - assert out["time"].min() == utc("2022-01-01 00:00:00") - assert out["time"].max() == utc("2022-01-01 00:10:00") + assert df["time"].min() == utc("2022-01-01 00:00:00") + assert df["time"].max() == utc("2022-01-01 00:10:00") def test_missing_after_fetches_post_and_appends(tmp_path, utc): @@ -89,14 +98,19 @@ def test_missing_after_fetches_post_and_appends(tmp_path, utc): ("EURUSD", "M1", utc("2022-01-01 00:05:00"), utc("2022-01-01 00:10:00")): df_post }) - from core.data_provider.providers.default_provider import DefaultOhlcvDataProvider - p = DefaultOhlcvDataProvider( - backend=backend, cache=cache, - backtest_start=start, backtest_end=end, + from core.data_provider.providers.default_provider import BacktestStrategyDataProvider + p = BacktestStrategyDataProvider( + backend=backend, + cache=cache, + backtest_start=start, + backtest_end=end, + required_timeframes=["M1"], + startup_candle_count=0, logger=NullLogger(), ) - out = p.get_ohlcv(symbol="EURUSD", timeframe="M1", start=start, end=end) + out = p.fetch(symbol="EURUSD") + df = out["M1"] assert backend.calls == [("EURUSD","M1", utc("2022-01-01 00:05:00"), utc("2022-01-01 00:10:00"))] - assert out["time"].max() == utc("2022-01-01 00:10:00") \ No newline at end of file + assert df["time"].max() == utc("2022-01-01 00:10:00") \ No newline at end of file diff --git a/core/live_trading/engine.py b/core/live_trading/engine.py index 61f8753..bc4f491 100644 --- a/core/live_trading/engine.py +++ b/core/live_trading/engine.py @@ -3,47 +3,33 @@ from datetime import datetime import time -from core.live_trading.strategy_adapter import LiveStrategyAdapter +from core.live_trading.strategy_runner import LiveStrategyRunner class LiveEngine: """ Live trading engine. - Tick loop: - - always run exit logic - - on new candle: update strategy and maybe execute entry """ def __init__( self, *, position_manager, - market_data_provider, - strategy_adapter: LiveStrategyAdapter, + market_state_provider, + strategy_runner, tick_interval_sec: float = 1.0, ): self.position_manager = position_manager - self.market_data_provider = market_data_provider - self.strategy_adapter = strategy_adapter + self.market_state_provider = market_state_provider + self.strategy_runner = strategy_runner self.tick_interval_sec = tick_interval_sec self._running = False - self._last_candle_time = None - - # keep last candle-derived state for tick-based management - self._last_strategy_row = None + self._last_strategy_state = None def start(self): self._running = True print("🟢 LiveEngine started") - self._run_loop() - - def stop(self): - self._running = False - print("🔴 LiveEngine stopped") - - def _run_loop(self): - last_heartbeat = time.time() while self._running: try: @@ -51,51 +37,30 @@ def _run_loop(self): except Exception as e: print(f"❌ Engine error: {type(e).__name__}: {e}") - if time.time() - last_heartbeat > 30: - print("💓 Engine alive") - last_heartbeat = time.time() - time.sleep(self.tick_interval_sec) + def stop(self): + self._running = False + print("🔴 LiveEngine stopped") + def _tick(self): - market_state = self.market_data_provider() + market_state = self.market_state_provider.poll() if market_state is None: return - market_state.setdefault("time", datetime.utcnow()) - - # ----------------------------------------- - # Inject last strategy management signals - # ----------------------------------------- - if self._last_strategy_row is not None: - # these keys are optional in your DF - se = self._last_strategy_row.get("signal_exit") - csl = self._last_strategy_row.get("custom_stop_loss") - - if isinstance(se, dict): - market_state["signal_exit"] = se - if isinstance(csl, dict): - market_state["custom_stop_loss"] = csl - - # ----------------------------------------- + # --------------------------- # EXIT LOGIC (tick-based) - # ----------------------------------------- + # --------------------------- self.position_manager.on_tick(market_state=market_state) - # ----------------------------------------- + # --------------------------- # ENTRY LOGIC (candle-based) - # ----------------------------------------- - candle_time = market_state.get("candle_time") - if candle_time is None: - return - - if self._last_candle_time == candle_time: + # --------------------------- + if market_state.get("candle_time") is None: return - self._last_candle_time = candle_time - - result = self.strategy_adapter.on_new_candle() - self._last_strategy_row = result.last_row + result = self.strategy_runner.run() + self._last_strategy_state = result.last_row if result.plan is not None: self.position_manager.on_trade_plan( diff --git a/core/live_trading/execution/mt5_adapter.py b/core/live_trading/execution/mt5_adapter.py index 0c41b13..8876d3d 100644 --- a/core/live_trading/execution/mt5_adapter.py +++ b/core/live_trading/execution/mt5_adapter.py @@ -10,24 +10,22 @@ class MT5Adapter: """ def __init__( - self, - *, - login: int | None = None, - password: str | None = None, - server: str | None = None, - dry_run: bool = False, + self, + *, + dry_run: bool = False, + log, ): self.dry_run = dry_run + self.log = log if self.dry_run: - print("⚠ MT5Adapter running in DRY-RUN mode") + self.log.warning("MT5Adapter running in DRY-RUN mode") return if not mt5.terminal_info(): raise RuntimeError("MT5 terminal not initialized") - print("🟢 MT5 initialized") - + self.log.info("MT5 initialized") # ================================================== # Execution API # ================================================== @@ -38,25 +36,34 @@ def open_position( symbol: str, direction: str, volume: float, - price: float, sl: float, tp: float | None = None, ) -> Dict[str, Any]: if self.dry_run: - print( + self.log.debug( f"[DRY-RUN] OPEN {symbol} {direction} " - f"vol={volume} price={price} sl={sl} tp={tp}" + f"vol={volume} sl={sl} tp={tp}" ) - return {"ticket": f"MOCK_{symbol}", "price": price} + return {"ticket": f"MOCK_{symbol}", "price": None} # -------------------------------------------------- - # SYMBOL + TICK + # SYMBOL INFO / MODE # -------------------------------------------------- symbol_info = mt5.symbol_info(symbol) if symbol_info is None: raise RuntimeError(f"Symbol not found: {symbol}") + if symbol_info.trade_mode == mt5.SYMBOL_TRADE_MODE_CLOSEONLY: + raise RuntimeError(f"Symbol {symbol} is CLOSE-ONLY") + + # -------------------------------------------------- + # NETTING GUARD + # -------------------------------------------------- + positions = mt5.positions_get(symbol=symbol) + if positions: + raise RuntimeError(f"Position already open for {symbol}") + if not symbol_info.visible: mt5.symbol_select(symbol, True) @@ -67,33 +74,32 @@ def open_position( market_price = tick.ask if direction == "long" else tick.bid # -------------------------------------------------- - # VALIDATE SL / TP (ABSOLUTELY REQUIRED) + # SL / TP VALIDATION # -------------------------------------------------- stops_level = symbol_info.trade_stops_level * symbol_info.point if direction == "long": if sl >= market_price: - raise RuntimeError(f"Invalid SL for long: sl={sl}, price={market_price}") + raise RuntimeError("Invalid SL for long") if tp is not None and tp <= market_price: - raise RuntimeError(f"Invalid TP for long: tp={tp}, price={market_price}") + raise RuntimeError("Invalid TP for long") if (market_price - sl) < stops_level: - raise RuntimeError(f"SL too close: {market_price - sl} < {stops_level}") - if tp is not None and (tp - market_price) < stops_level: - raise RuntimeError(f"TP too close: {tp - market_price} < {stops_level}") + raise RuntimeError("SL too close") + if tp and (tp - market_price) < stops_level: + raise RuntimeError("TP too close") order_type = mt5.ORDER_TYPE_BUY - - else: # short + else: if sl <= market_price: - raise RuntimeError(f"Invalid SL for short: sl={sl}, price={market_price}") + raise RuntimeError("Invalid SL for short") if tp is not None and tp >= market_price: - raise RuntimeError(f"Invalid TP for short: tp={tp}, price={market_price}") + raise RuntimeError("Invalid TP for short") if (sl - market_price) < stops_level: - raise RuntimeError(f"SL too close: {sl - market_price} < {stops_level}") - if tp is not None and (market_price - tp) < stops_level: - raise RuntimeError(f"TP too close: {market_price - tp} < {stops_level}") + raise RuntimeError("SL too close") + if tp and (market_price - tp) < stops_level: + raise RuntimeError("TP too close") order_type = mt5.ORDER_TYPE_SELL @@ -133,7 +139,7 @@ def close_position( ) -> None: if self.dry_run: - print(f"[DRY-RUN] CLOSE ticket={ticket} price={price}") + self.log.debug(f"[DRY-RUN] CLOSE ticket={ticket} price={price}") return positions = mt5.positions_get(ticket=int(ticket)) @@ -168,7 +174,7 @@ def close_position( def close_partial(self, *, ticket: str, volume: float, price: float): if self.dry_run: - print(f"[DRY-RUN] PARTIAL CLOSE ticket={ticket} vol={volume} price={price}") + self.log.debug(f"[DRY-RUN] PARTIAL CLOSE ticket={ticket} vol={volume} price={price}") return request = { @@ -187,7 +193,7 @@ def close_partial(self, *, ticket: str, volume: float, price: float): def modify_sl(self, *, ticket: str, new_sl: float): if self.dry_run: - print(f"[DRY-RUN] MODIFY SL ticket={ticket} sl={new_sl}") + self.log.debug(f"[DRY-RUN] MODIFY SL ticket={ticket} sl={new_sl}") return request = { @@ -206,7 +212,7 @@ def init_mt5(self): raise RuntimeError(f"MT5 init failed: {mt5.last_error()}") info = mt5.account_info() - print( + self.log.debug( "🟢 MT5 initialized | " f"Account={info.login} " f"Balance={info.balance} " @@ -216,4 +222,4 @@ def init_mt5(self): def shutdown(self): if not self.dry_run: mt5.shutdown() - print("🔴 MT5 shutdown") + self.log.debug("🔴 MT5 shutdown") diff --git a/core/live_trading/execution/position_manager.py b/core/live_trading/execution/position_manager.py index 80f84f7..5230419 100644 --- a/core/live_trading/execution/position_manager.py +++ b/core/live_trading/execution/position_manager.py @@ -22,7 +22,7 @@ class PositionManager: Responsibilities: - consume TradePlan and execute entry - - monitor active positions and perform exits (engine-managed) + - monitor active positions and perform exits - keep repo in sync with broker """ @@ -31,6 +31,13 @@ def __init__(self, repo: TradeRepo, adapter: MT5Adapter): self.adapter = adapter self.state = TradeStateService(repo=repo, adapter=adapter) + # ================================================== + # Helpers + # ================================================== + + def _is_dry_run(self) -> bool: + return bool(getattr(self.adapter, "dry_run", False)) + # ================================================== # ENTRY # ================================================== @@ -43,7 +50,11 @@ def on_trade_plan(self, *, plan: TradePlan, market_state: dict) -> None: execution = ExitExecution.from_config(plan.strategy_config) volume = self._compute_volume(plan=plan) - params = trade_plan_to_mt5_order(plan=plan, volume=volume, execution=execution) + params = trade_plan_to_mt5_order( + plan=plan, + volume=volume, + execution=execution, + ) print( f"📦 EXECUTING TRADE PLAN | {plan.symbol} {plan.direction} " @@ -54,13 +65,15 @@ def on_trade_plan(self, *, plan: TradePlan, market_state: dict) -> None: symbol=params.symbol, direction=params.direction, volume=params.volume, - price=params.price, sl=params.sl, tp=params.tp, ) - # Store execution policy with the trade so tick handler uses the same rules. - # If your repo does not support extra payload, persist it inside the stored trade dict. + # ENTRY may be skipped (CLOSE-ONLY, DRY_RUN guard etc.) + if result is None: + print(f"⚠ ENTRY skipped for {plan.symbol}") + return + self.state.record_entry( plan=plan, exec_result=result, @@ -77,8 +90,7 @@ def _compute_volume(self, *, plan: TradePlan) -> float: sl=plan.exit_plan.sl, max_risk=max_risk, ) - norm = Mt5RiskParams.normalize_volume(plan.symbol, raw_volume) - return norm + return Mt5RiskParams.normalize_volume(plan.symbol, raw_volume) # ================================================== # TICK LOOP @@ -98,26 +110,46 @@ def on_tick(self, *, market_state: dict) -> None: execution = self._get_execution(trade) - if self._handle_managed_exit_signal(trade_id=trade_id, trade=trade, market_state=market_state, price=price, now=now): + if self._handle_managed_exit_signal( + trade_id=trade_id, + trade=trade, + market_state=market_state, + price=price, + now=now, + ): continue - # TP1 / BE (only if TP1 is engine-managed; otherwise repo should mark it from broker sync or you ignore TP1) - if self._handle_tp1_and_be(trade_id=trade_id, trade=trade, execution=execution, price=price, now=now): + if self._handle_tp1_and_be( + trade_id=trade_id, + trade=trade, + execution=execution, + price=price, + now=now, + ): continue - self._handle_trailing_sl(trade_id=trade_id, trade=trade, market_state=market_state) + self._handle_trailing_sl( + trade_id=trade_id, + trade=trade, + market_state=market_state, + ) - if self._handle_engine_exit(trade_id=trade_id, trade=trade, execution=execution, price=price, now=now): - continue + self._handle_engine_exit( + trade_id=trade_id, + trade=trade, + execution=execution, + price=price, + now=now, + ) # ================================================== - # Tick handlers (small, testable pieces) + # Tick handlers # ================================================== def _handle_broker_sync(self, *, trade_id: str, trade: dict, now: datetime) -> bool: - """ - If broker closed the position (e.g. broker TP2), sync repo and stop processing. - """ + if self._is_dry_run(): + return False + positions = mt5.positions_get(ticket=int(trade["ticket"])) if positions: return False @@ -150,7 +182,13 @@ def _handle_managed_exit_signal( and signal_exit.get("direction") == "close" ): print(f"🚪 MANAGED EXIT for {trade_id}") - self.adapter.close_position(ticket=trade["ticket"], price=price) + + if not self._is_dry_run(): + self.adapter.close_position( + ticket=trade["ticket"], + price=price, + ) + self.state.record_exit( trade_id=trade_id, price=price, @@ -171,36 +209,36 @@ def _handle_tp1_and_be( price: float, now: datetime, ) -> bool: - """ - TP1 partial close and optional SL->BE. - - Rules: - - If TP1 is DISABLED -> do nothing. - - If TP1 is BROKER -> do not do partial close (broker closes full; partial isn't supported that way). - (You can still do BE_ON_TP1 if you detect price reached tp1; that's optional and controlled below.) - - If TP1 is ENGINE -> run partial close logic. - """ if trade.get("tp1_executed"): return False if execution.tp1 == "DISABLED": return False - # Detect TP1 reached (price-based) if not LiveExitRules.check_tp1_hit(trade=trade, price=price): return False if execution.tp1 == "ENGINE": - self._execute_tp1_partial(trade_id=trade_id, trade=trade, price=price, now=now) + self._execute_tp1_partial( + trade_id=trade_id, + trade=trade, + price=price, + now=now, + ) - # Move SL -> BE when TP1 is reached (applies for ENGINE or BROKER TP1 if you want) if execution.be_on_tp1: self._try_move_sl_to_be(trade_id=trade_id, trade=trade) - # If we just executed TP1 partial, we end tick processing for that trade return execution.tp1 == "ENGINE" - def _execute_tp1_partial(self, *, trade_id: str, trade: dict, price: float, now: datetime) -> None: + def _execute_tp1_partial( + self, + *, + trade_id: str, + trade: dict, + price: float, + now: datetime, + ) -> None: total_vol = float(trade["volume"]) cfg = trade.get("strategy_config", {}) close_ratio = float(cfg.get("TP1_CLOSE_RATIO", 0.5)) @@ -212,7 +250,12 @@ def _execute_tp1_partial(self, *, trade_id: str, trade: dict, price: float, now: print(f"🎯 TP1 PARTIAL CLOSE {trade_id}: {close_vol}/{total_vol}") - self.adapter.close_partial(ticket=trade["ticket"], volume=close_vol, price=price) + if not self._is_dry_run(): + self.adapter.close_partial( + ticket=trade["ticket"], + volume=close_vol, + price=price, + ) self.state.mark_tp1_executed( trade_id=trade_id, @@ -226,7 +269,10 @@ def _try_move_sl_to_be(self, *, trade_id: str, trade: dict) -> None: current_sl = float(trade["sl"]) direction = trade["direction"] - already_be = (direction == "long" and current_sl >= entry) or (direction == "short" and current_sl <= entry) + already_be = ( + (direction == "long" and current_sl >= entry) + or (direction == "short" and current_sl <= entry) + ) if already_be: return @@ -234,7 +280,13 @@ def _try_move_sl_to_be(self, *, trade_id: str, trade: dict) -> None: self.state.update_sl(trade_id=trade_id, new_sl=entry) self.state.set_flag(trade_id=trade_id, key="be_moved", value=True) - def _handle_trailing_sl(self, *, trade_id: str, trade: dict, market_state: dict) -> None: + def _handle_trailing_sl( + self, + *, + trade_id: str, + trade: dict, + market_state: dict, + ) -> None: cfg = trade.get("strategy_config", {}) if not cfg.get("USE_TRAILING"): return @@ -255,7 +307,10 @@ def _handle_trailing_sl(self, *, trade_id: str, trade: dict, market_state: dict) current_sl = float(trade["sl"]) direction = trade["direction"] - improved = (direction == "long" and candidate > current_sl) or (direction == "short" and candidate < current_sl) + improved = ( + (direction == "long" and candidate > current_sl) + or (direction == "short" and candidate < current_sl) + ) if not improved: return @@ -271,23 +326,16 @@ def _handle_engine_exit( price: float, now: datetime, ) -> bool: - """ - Engine-managed exits: - - SL is always engine-managed (we close if price crosses SL) - - TP2 can be engine-managed or broker-managed - """ - # If TP2 is broker-managed, we still allow engine check as a safety net. exit_res = LiveExitRules.check_exit(trade=trade, price=price, now=now) if exit_res is None: return False - # Respect TP2 executor if you want strict behavior: - if exit_res.reason == "TP2" and execution.tp2 != "ENGINE": - # broker should close it; keep it as safety net? choose: - # return False # strict - pass # permissive safety net (default) + if not self._is_dry_run(): + self.adapter.close_position( + ticket=trade["ticket"], + price=exit_res.exit_price, + ) - self.adapter.close_position(ticket=trade["ticket"], price=exit_res.exit_price) self.state.record_exit( trade_id=trade_id, price=exit_res.exit_price, @@ -298,12 +346,8 @@ def _handle_engine_exit( return True def _get_execution(self, trade: dict) -> ExitExecution: - """ - Prefer persisted execution policy from repo; fallback to strategy_config. - """ raw = trade.get("exit_execution") if isinstance(raw, dict): - # reconstruct via config to keep defaults consistent return ExitExecution.from_config({"EXIT_EXECUTION": raw}) cfg = trade.get("strategy_config", {}) diff --git a/core/live_trading/logging.py b/core/live_trading/logging.py new file mode 100644 index 0000000..5a3ca77 --- /dev/null +++ b/core/live_trading/logging.py @@ -0,0 +1,21 @@ +from core.logging.run_logger import RunLogger +from core.logging.config import LoggerConfig +from core.logging.prefix import LOG_PREFIX + + +def create_live_logger(symbol: str) -> RunLogger: + cfg = LoggerConfig( + stdout=True, + file=False, + timing=False, + profiling=False, + ) + + return ( + RunLogger( + name="live", + cfg=cfg, + prefix=LOG_PREFIX["LIVE"], + ) + .with_context(symbol=symbol) + ) \ No newline at end of file diff --git a/core/live_trading/market_state.py b/core/live_trading/market_state.py new file mode 100644 index 0000000..51f26c1 --- /dev/null +++ b/core/live_trading/market_state.py @@ -0,0 +1,20 @@ +from abc import ABC, abstractmethod +from typing import Optional, Dict, Any + + +class MarketStateProvider(ABC): + """ + Provides market state events for the LiveEngine. + """ + + @abstractmethod + def poll(self) -> Optional[Dict[str, Any]]: + """ + Returns: + { + "price": float, + "time": datetime, + "candle_time": datetime | None + } + """ + ... \ No newline at end of file diff --git a/core/live_trading/mt5_market_state.py b/core/live_trading/mt5_market_state.py new file mode 100644 index 0000000..2546124 --- /dev/null +++ b/core/live_trading/mt5_market_state.py @@ -0,0 +1,38 @@ +import MetaTrader5 as mt5 +import pandas as pd +from datetime import datetime + +from core.live_trading.market_state import MarketStateProvider +from core.utils.timeframe import MT5_TIMEFRAME_MAP + + +class MT5MarketStateProvider(MarketStateProvider): + """ + Polls MT5 for last closed candle. + """ + + def __init__(self, *, symbol: str, timeframe: str): + self.symbol = symbol + self.timeframe = timeframe + self._last_candle_time = None + + def poll(self): + tf = MT5_TIMEFRAME_MAP[self.timeframe] + rates = mt5.copy_rates_from_pos(self.symbol, tf, 0, 2) + + if rates is None or len(rates) < 2: + return None + + last_closed = rates[-2] + candle_time = pd.to_datetime( + last_closed["time"], unit="s", utc=True + ) + + is_new_candle = candle_time != self._last_candle_time + self._last_candle_time = candle_time + + return { + "price": float(last_closed["close"]), + "time": candle_time, + "candle_time": candle_time if is_new_candle else None, + } \ No newline at end of file diff --git a/core/live_trading/run_trading.py b/core/live_trading/run_trading.py index 958bdca..69806ec 100644 --- a/core/live_trading/run_trading.py +++ b/core/live_trading/run_trading.py @@ -5,183 +5,95 @@ lookback_to_bars, MT5Client, ) +from core.data_provider.providers.live_provider import LiveStrategyDataProvider from core.live_trading.engine import LiveEngine -from core.live_trading.strategy_adapter import LiveStrategyAdapter -from core.live_trading.execution import PositionManager -from core.live_trading.execution import MT5Adapter +from core.live_trading.execution.mt5_adapter import MT5Adapter +from core.live_trading.execution.position_manager import PositionManager +from core.live_trading.logging import create_live_logger +from core.live_trading.mt5_market_state import MT5MarketStateProvider +from core.live_trading.strategy_runner import LiveStrategyRunner + from core.live_trading.trade_repo import TradeRepo from core.live_trading.strategy_loader import load_strategy_class -from core.utils.lookback import LOOKBACK_CONFIG, MIN_HTF_BARS -from core.utils.timeframe import MT5_TIMEFRAME_MAP +from core.logging.config import LoggerConfig +from core.logging.prefix import LOG_PREFIX +from core.logging.run_logger import RunLogger +from core.utils.lookback import LOOKBACK_CONFIG class LiveTradingRunner: """ - MT5 live trading runner. - Symmetric API to BacktestRunner. + Live trading application runner. """ def __init__(self, cfg): self.cfg = cfg + self.log = create_live_logger(cfg.SYMBOLS) - self.engine = None - self.strategy = None - self.provider = None - - # ================================================== - # 1️⃣ MT5 INIT - # ================================================== + def run(self): + self.log.info("starting live trading runner") - def _init_mt5(self): if not mt5.initialize(): - raise RuntimeError(f"MT5 init failed: {mt5.last_error()}") - - if not mt5.symbol_select(self.cfg.SYMBOLS, True): - raise RuntimeError( - f"Symbol not available: {self.cfg.SYMBOLS}" - ) - - info = mt5.account_info() - print( - f"🟢 MT5 connected | " - f"Account={info.login} " - f"Balance={info.balance}" - ) - - # ================================================== - # 2️⃣ INITIAL DATA (WARMUP) - # ================================================== - - def _load_initial_data(self) -> pd.DataFrame: - tf = MT5_TIMEFRAME_MAP[self.cfg.TIMEFRAME] - lookback = LOOKBACK_CONFIG[self.cfg.TIMEFRAME] - bars = lookback_to_bars(self.cfg.TIMEFRAME, lookback) - - rates = mt5.copy_rates_from_pos( - self.cfg.SYMBOLS, tf, 0, bars - ) - - if rates is None or len(rates) == 0: - raise RuntimeError("Initial MT5 data fetch failed") - - df = pd.DataFrame(rates) - df["time"] = pd.to_datetime(df["time"], unit="s", utc=True) + self.log.error("MT5 init failed") + raise RuntimeError("MT5 init failed") - print( - f"📦 Warmup loaded | " - f"{len(df)} candles ({self.cfg.TIMEFRAME})" + mt5.symbol_select(self.cfg.SYMBOLS, True) + self.log.info( + "MT5 initialized", ) - return df - - # ================================================== - # 3️⃣ INFORMATIVE PROVIDER - # ================================================== - - def _build_provider(self) -> MT5Client: - StrategyClass = load_strategy_class( - self.cfg.STRATEGY_CLASS - ) + StrategyClass = load_strategy_class(self.cfg.STRATEGY_CLASS) bars_per_tf = {} - for tf in StrategyClass.get_required_informatives(): + for tf in [self.cfg.TIMEFRAME] + StrategyClass.get_required_informatives(): lookback = LOOKBACK_CONFIG[tf] - bars_per_tf[tf] = max( - lookback_to_bars(tf, lookback), - MIN_HTF_BARS.get(tf, 0), - ) + bars_per_tf[tf] = lookback_to_bars(tf, lookback) - provider = MT5Client(bars_per_tf=bars_per_tf) - - return provider + self.log.debug( + "bars_per_tf built", + # jeśli chcesz, możesz to dać do contextu + ) - # ================================================== - # 4️⃣ STRATEGY - # ================================================== + client = MT5Client() + data_provider = LiveStrategyDataProvider( + client=client, + bars_per_tf=bars_per_tf, + ) - def _build_strategy(self, df_execution: pd.DataFrame): - self.strategy = load_strategy_class(self.cfg.STRATEGY_CLASS)( - df=df_execution, + strategy = StrategyClass( + df=None, symbol=self.cfg.SYMBOLS, startup_candle_count=self.cfg.STARTUP_CANDLE_COUNT, ) - self.strategy.validate() - - # ================================================== - # 5️⃣ ENGINE - # ================================================== - - def _build_engine(self): - - adapter = MT5Adapter(dry_run=self.cfg.DRY_RUN) - repo = TradeRepo() - pm = PositionManager(repo=repo, adapter=adapter) - - strategy_adapter = LiveStrategyAdapter( - strategy=self.strategy, - provider=self.provider, + strategy_runner = LiveStrategyRunner( + strategy=strategy, + data_provider=data_provider, symbol=self.cfg.SYMBOLS, - startup_candle_count=self.cfg.STARTUP_CANDLE_COUNT, - df_execution=self.df_execution, ) - tf = MT5_TIMEFRAME_MAP[self.cfg.TIMEFRAME] - - def market_data_provider(): - rates = mt5.copy_rates_from_pos( - self.cfg.SYMBOLS, tf, 0, 2 - ) - if rates is None or len(rates) < 2: - return None - - last_closed = rates[-2] - - candle_ts = pd.to_datetime( - last_closed["time"], unit="s", utc=True - ) + market_state_provider = MT5MarketStateProvider( + symbol=self.cfg.SYMBOLS, + timeframe=self.cfg.TIMEFRAME, + ) - return { - "price": float(last_closed["close"]), - "time": candle_ts, # logical event time - "candle_time": candle_ts # SAME TYPE, SAME MEANING - } + adapter = MT5Adapter( + dry_run=self.cfg.DRY_RUN, + log=self.log.with_context(component="adapter"), + ) + repo = TradeRepo() + pm = PositionManager(repo=repo, adapter=adapter) - self.engine = LiveEngine( + engine = LiveEngine( position_manager=pm, - market_data_provider=market_data_provider, - strategy_adapter=strategy_adapter, + market_state_provider=market_state_provider, + strategy_runner=strategy_runner, tick_interval_sec=self.cfg.TICK_INTERVAL_SEC, ) - print("⚙️ LiveEngine ready") - - # ================================================== - # 6️⃣ RUN - # ================================================== - - def run(self): - print("🚀 LiveTradingRunner start") - - self._init_mt5() - self.df_execution = self._load_initial_data() - self.provider = self._build_provider() - - self._build_strategy(self.df_execution) - self._build_engine() - - print( - f"🚀 LIVE TRADING STARTED | " - f"{self.cfg.SYMBOLS} {self.cfg.TIMEFRAME} " - f"DRY_RUN={self.cfg.DRY_RUN}" - ) - - self.engine.start() - - # ================================================== - # 7️⃣ SHUTDOWN - # ================================================== + self.log.with_context( + timeframe=self.cfg.TIMEFRAME, + dry_run=self.cfg.DRY_RUN, + ).info("LIVE STARTED") - def shutdown(self): - mt5.shutdown() - print("🔴 MT5 shutdown") \ No newline at end of file + engine.start() \ No newline at end of file diff --git a/core/live_trading/strategy_adapter.py b/core/live_trading/strategy_adapter.py deleted file mode 100644 index e70f115..0000000 --- a/core/live_trading/strategy_adapter.py +++ /dev/null @@ -1,75 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from typing import Any, Optional - -import pandas as pd - -from core.strategy.orchestration.strategy_execution import execute_strategy -from core.strategy.plan_builder import PlanBuildContext -from core.strategy.trade_plan import TradePlan - - -@dataclass(frozen=True) -class StrategyCandleResult: - df_plot: pd.DataFrame - last_row: pd.Series - plan: TradePlan | None - - -class LiveStrategyAdapter: - """ - Live orchestration adapter. - - Responsibilities: - - execute vector strategy update on df_execution - - produce entry plan from last row using BaseStrategy default builder - """ - - def __init__( - self, - *, - strategy, - provider, - symbol: str, - startup_candle_count: int, - df_execution: pd.DataFrame, - ): - self.strategy = strategy - self.provider = provider - self.symbol = symbol - self.startup_candle_count = startup_candle_count - self.df_execution = df_execution - - self._last_df_plot: Optional[pd.DataFrame] = None - - def on_new_candle(self) -> StrategyCandleResult: - df_plot = execute_strategy( - strategy=self.strategy, - df=self.df_execution, - provider=self.provider, - symbol=self.symbol, - startup_candle_count=self.startup_candle_count, - ) - - if df_plot is None or df_plot.empty: - # hard guard: strategy must return df - raise RuntimeError("execute_strategy returned empty df") - - last_row = df_plot.iloc[-1] - - ctx = PlanBuildContext( - symbol=self.symbol, - strategy_name=type(self.strategy).__name__, - strategy_config=getattr(self.strategy, "strategy_config", {}) or {}, - ) - - # Expect BaseStrategy provides this default method. - build_fn = getattr(self.strategy, "build_trade_plan_live", None) - plan = build_fn(row=last_row, ctx=ctx) if callable(build_fn) else None - - self._last_df_plot = df_plot - return StrategyCandleResult(df_plot=df_plot, last_row=last_row, plan=plan) - - def last_df_plot(self) -> Optional[pd.DataFrame]: - return self._last_df_plot \ No newline at end of file diff --git a/core/live_trading/strategy_runner.py b/core/live_trading/strategy_runner.py new file mode 100644 index 0000000..44086b7 --- /dev/null +++ b/core/live_trading/strategy_runner.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from typing import Optional + +import pandas as pd + +from core.strategy.orchestration.informatives import apply_informatives +from core.strategy.plan_builder import PlanBuildContext +from core.utils.timeframe import tf_to_minutes + + + + +class StrategyCandleResult: + def __init__(self, *, last_row: pd.Series, plan): + self.last_row = last_row + self.plan = plan + + +class LiveStrategyRunner: + """ + Runs strategy on each new candle. + """ + + def __init__( + self, + *, + strategy, + data_provider, + symbol: str, + ): + self.strategy = strategy + self.data_provider = data_provider + self.symbol = symbol + self._last_df: Optional[pd.DataFrame] = None + + def run(self) -> StrategyCandleResult: + data_by_tf = self.data_provider.fetch(self.symbol) + + base_tf = min(data_by_tf.keys(), key=tf_to_minutes) + df_base = data_by_tf[base_tf] + + df_context = apply_informatives( + df=df_base, + strategy=self.strategy, + data_by_tf=data_by_tf, + ) + + self.strategy.df = df_context + self.strategy.populate_indicators() + self.strategy.populate_entry_trend() + self.strategy.populate_exit_trend() + + last_row = df_context.iloc[-1] + + ctx = PlanBuildContext( + symbol=self.symbol, + strategy_name=self.strategy.get_strategy_name(), + strategy_config=self.strategy.strategy_config, + ) + + plan = self.strategy.build_trade_plan_live( + row=last_row, + ctx=ctx, + ) + + self._last_df = df_context + + return StrategyCandleResult( + last_row=last_row, + plan=plan, + ) \ No newline at end of file diff --git a/core/live_trading/tests/conftest.py b/core/live_trading/tests/conftest.py new file mode 100644 index 0000000..efd5a4c --- /dev/null +++ b/core/live_trading/tests/conftest.py @@ -0,0 +1,6 @@ +import pytest +from datetime import datetime, timezone + +@pytest.fixture +def fixed_now(): + return datetime(2025, 1, 1, 12, 0, tzinfo=timezone.utc) \ No newline at end of file diff --git a/core/live_trading/tests/live_state/active_trades.json b/core/live_trading/tests/live_state/active_trades.json deleted file mode 100644 index 9e26dfe..0000000 --- a/core/live_trading/tests/live_state/active_trades.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/core/live_trading/tests/live_state/closed_trades.json b/core/live_trading/tests/live_state/closed_trades.json deleted file mode 100644 index f0fb124..0000000 --- a/core/live_trading/tests/live_state/closed_trades.json +++ /dev/null @@ -1,142 +0,0 @@ -{ - "369957004": { - "trade_id": 369957004, - "symbol": "BTCUSD", - "direction": "long", - "entry_price": 95270.97, - "volume": 0.0, - "sl": 95260.3700304329, - "tp1": 95281.5699695671, - "tp2": 95292.16993913421, - "tp1_executed": false, - "entry_time": "2026-01-17T20:20:04.675410", - "entry_tag": "long_trend1", - "strategy": "Hts", - "strategy_config": {}, - "ticket": 369957004, - "exit_price": 95292.16993913421, - "exit_time": "2026-01-17 20:26:47.837174", - "exit_reason": "BROKER_CLOSED", - "exit_level_tag": "TP2_live" - }, - "369957133": { - "trade_id": 369957133, - "symbol": "BTCUSD", - "direction": "long", - "entry_price": 95293.39, - "volume": 0.0, - "sl": 95281.19661899896, - "tp1": 95305.58338100104, - "tp2": 95317.77676200207, - "tp1_executed": false, - "entry_time": "2026-01-17T20:26:47.837174", - "entry_tag": "long_trend1", - "strategy": "Hts", - "strategy_config": {}, - "ticket": 369957133, - "exit_price": 95317.77676200207, - "exit_time": "2026-01-17 20:28:56.273059", - "exit_reason": "BROKER_CLOSED", - "exit_level_tag": "TP2_live" - }, - "369957207": { - "trade_id": 369957207, - "symbol": "BTCUSD", - "direction": "short", - "entry_price": 95288.48, - "volume": 0.0, - "sl": 95307.13210570674, - "tp1": 95269.82789429325, - "tp2": 95251.17578858651, - "tp1_executed": false, - "entry_time": "2026-01-17T20:29:14.113469", - "entry_tag": "short_trend1", - "strategy": "Hts", - "strategy_config": {}, - "ticket": 369957207, - "exit_price": 95251.17578858651, - "exit_time": "2026-01-17 20:32:20.897390", - "exit_reason": "BROKER_CLOSED", - "exit_level_tag": "TP2_live" - }, - "369957531": { - "trade_id": 369957531, - "symbol": "BTCUSD", - "direction": "short", - "entry_price": 95256.22, - "volume": 0.0, - "sl": 95271.83845894576, - "tp1": 95240.60154105425, - "tp2": 95224.98308210849, - "tp1_executed": false, - "entry_time": "2026-01-17T20:39:40.342580", - "entry_tag": "short_trend1", - "strategy": "Hts", - "strategy_config": {}, - "ticket": 369957531, - "exit_price": 95224.98308210849, - "exit_time": "2026-01-17 20:42:23.287946", - "exit_reason": "BROKER_CLOSED", - "exit_level_tag": "TP2_live" - }, - "369957556": { - "trade_id": 369957556, - "symbol": "BTCUSD", - "direction": "long", - "entry_price": 95264.67, - "volume": 0.0, - "sl": 95250.8447396852, - "tp1": 95278.4952603148, - "tp2": 95292.3205206296, - "tp1_executed": false, - "entry_time": "2026-01-17T20:42:23.287946", - "entry_tag": "long_trend1", - "strategy": "Hts", - "strategy_config": {}, - "ticket": 369957556, - "exit_price": 95292.3205206296, - "exit_time": "2026-01-17 20:44:14.323056", - "exit_reason": "BROKER_CLOSED", - "exit_level_tag": "TP2_live" - }, - "369957774": { - "trade_id": 369957774, - "symbol": "BTCUSD", - "direction": "long", - "entry_price": 95270.41, - "volume": 0.0, - "sl": 95257.81345265859, - "tp1": 95283.00654734141, - "tp2": 95295.60309468283, - "tp1_executed": false, - "entry_time": "2026-01-17T20:44:14.323056", - "entry_tag": "long_trend1", - "strategy": "Hts", - "strategy_config": {}, - "ticket": 369957774, - "exit_price": 95295.60309468283, - "exit_time": "2026-01-17 20:45:11.186981", - "exit_reason": "BROKER_CLOSED", - "exit_level_tag": "TP2_live" - }, - "369958228": { - "trade_id": 369958228, - "symbol": "BTCUSD", - "direction": "long", - "entry_price": 95325.68, - "volume": 0.0, - "sl": 95243.75358716586, - "tp1": 95407.60641283412, - "tp2": 95489.53282566826, - "tp1_executed": false, - "entry_time": "2026-01-17T20:51:02.265531", - "entry_tag": "long_trend1", - "strategy": "Hts", - "strategy_config": {}, - "ticket": 369958228, - "exit_price": 95489.53282566826, - "exit_time": "2026-01-18 11:22:00+00:00", - "exit_reason": "BROKER_CLOSED", - "exit_level_tag": "TP2_live" - } -} \ No newline at end of file diff --git a/core/live_trading/tests/mocks.py b/core/live_trading/tests/mocks.py deleted file mode 100644 index 286c844..0000000 --- a/core/live_trading/tests/mocks.py +++ /dev/null @@ -1,32 +0,0 @@ -from datetime import datetime - - -def market_data_provider_mock(): - return { - "price": 1.1000, - "time": datetime.utcnow(), - } - - -def signal_provider_mock_once(): - """ - Zwraca jeden sygnał, potem pustą listę - """ - yielded = {"done": False} - - def _provider(): - if yielded["done"]: - return [] - yielded["done"] = True - return [{ - "symbol": "EURUSD", - "direction": "long", - "volume": 0.1, - "entry_price": 1.1000, - "sl": 1.0950, - "tp1": 1.1020, - "tp2": 1.1050, - "entry_tag": "entry_test", - }] - - return _provider diff --git a/core/live_trading/tests/test_calculate_volume.py b/core/live_trading/tests/test_calculate_volume.py new file mode 100644 index 0000000..055bce4 --- /dev/null +++ b/core/live_trading/tests/test_calculate_volume.py @@ -0,0 +1,22 @@ +from core.live_trading.execution.risk.sizing import LiveSizer + + +def test_calculate_volume_positive(mocker): + mocker.patch( + "core.live_trading.execution.risk.sizing.LiveSizer.get_account_size", + return_value=10_000, + ) + + mocker.patch( + "core.live_trading.execution.risk.sizing.Mt5RiskParams.get_symbol_risk_params", + return_value=(0.0001, 10), + ) + + vol = LiveSizer.calculate_volume( + symbol="EURUSD", + entry_price=1.2000, + sl=1.1900, + max_risk=0.01, + ) + + assert vol > 0 \ No newline at end of file diff --git a/core/live_trading/tests/test_full_flow.py b/core/live_trading/tests/test_full_flow.py new file mode 100644 index 0000000..80708e9 --- /dev/null +++ b/core/live_trading/tests/test_full_flow.py @@ -0,0 +1,25 @@ +from core.live_trading.engine import LiveEngine + + +def test_live_engine_full_flow(mocker, fixed_now): + market = mocker.Mock() + market.poll.return_value = { + "price": 100, + "time": fixed_now, + "candle_time": fixed_now, + } + + pm = mocker.Mock() + runner = mocker.Mock() + runner.run.return_value = mocker.Mock(plan="PLAN", last_row={}) + + engine = LiveEngine( + position_manager=pm, + market_state_provider=market, + strategy_runner=runner, + tick_interval_sec=0, + ) + + engine._tick() + + pm.on_trade_plan.assert_called_once() \ No newline at end of file diff --git a/core/live_trading/tests/test_live_engine.py b/core/live_trading/tests/test_live_engine.py new file mode 100644 index 0000000..bb6d561 --- /dev/null +++ b/core/live_trading/tests/test_live_engine.py @@ -0,0 +1,67 @@ +from core.live_trading.engine import LiveEngine + + +def test_live_engine_calls_on_tick_every_loop(mocker, fixed_now): + market = mocker.Mock() + market.poll.return_value = { + "price": 100.0, + "time": fixed_now, + "candle_time": None, + } + + pm = mocker.Mock() + runner = mocker.Mock() + + engine = LiveEngine( + position_manager=pm, + market_state_provider=market, + strategy_runner=runner, + tick_interval_sec=0, + ) + + engine._tick() + + pm.on_tick.assert_called_once() + runner.run.assert_not_called() + +def test_strategy_runs_only_on_new_candle(mocker, fixed_now): + market = mocker.Mock() + market.poll.return_value = { + "price": 100.0, + "time": fixed_now, + "candle_time": fixed_now, + } + + pm = mocker.Mock() + runner = mocker.Mock() + runner.run.return_value = mocker.Mock(plan=None, last_row={}) + + engine = LiveEngine( + position_manager=pm, + market_state_provider=market, + strategy_runner=runner, + tick_interval_sec=0, + ) + + engine._tick() + + runner.run.assert_called_once() + +def test_engine_skips_when_market_state_none(mocker): + market = mocker.Mock() + market.poll.return_value = None + + pm = mocker.Mock() + runner = mocker.Mock() + + engine = LiveEngine( + position_manager=pm, + market_state_provider=market, + strategy_runner=runner, + tick_interval_sec=0, + ) + + engine._tick() + + pm.on_tick.assert_not_called() + runner.run.assert_not_called() \ No newline at end of file diff --git a/core/live_trading/tests/test_position_manager.py b/core/live_trading/tests/test_position_manager.py new file mode 100644 index 0000000..8aba13c --- /dev/null +++ b/core/live_trading/tests/test_position_manager.py @@ -0,0 +1,71 @@ +from core.live_trading.execution.position_manager import PositionManager + + +def test_position_manager_blocks_duplicate_entry(mocker): + repo = mocker.Mock() + repo.load_active.return_value = {"1": {"symbol": "EURUSD"}} + + adapter = mocker.Mock() + pm = PositionManager(repo=repo, adapter=adapter) + + plan = mocker.Mock(symbol="EURUSD") + + pm.on_trade_plan(plan=plan, market_state={}) + + adapter.open_position.assert_not_called() + +def test_exit_on_sl(mocker, fixed_now): + repo = mocker.Mock() + repo.load_active.return_value = { + "1": { + "trade_id": "1", + "direction": "long", + "sl": 99.0, + "tp2": None, + "entry_time": fixed_now, + "ticket": "1", + } + } + + adapter = mocker.Mock(dry_run=True) + pm = PositionManager(repo=repo, adapter=adapter) + + pm.on_tick( + market_state={ + "price": 98.0, + "time": fixed_now, + } + ) + + repo.record_exit.assert_called_once() + +def test_tp1_partial_and_be(mocker, fixed_now): + repo = mocker.Mock() + repo.load_active.return_value = { + "1": { + "trade_id": "1", + "direction": "long", + "entry_price": 100, + "sl": 95, + "tp1": 105, + "tp1_executed": False, + "volume": 1.0, + "ticket": "1", + "strategy_config": {"TP1_CLOSE_RATIO": 0.5}, + } + } + + adapter = mocker.Mock(dry_run=True) + pm = PositionManager(repo=repo, adapter=adapter) + + # 🔧 KLUCZOWE: podmieniamy state + pm.state = mocker.Mock() + + pm.on_tick( + market_state={ + "price": 106, + "time": fixed_now, + } + ) + + pm.state.mark_tp1_executed.assert_called_once() \ No newline at end of file diff --git a/core/live_trading/tests/test_strategy_run.py b/core/live_trading/tests/test_strategy_run.py new file mode 100644 index 0000000..037b78d --- /dev/null +++ b/core/live_trading/tests/test_strategy_run.py @@ -0,0 +1,54 @@ +from datetime import datetime, timezone +from unittest.mock import Mock + +import pandas as pd + +from core.live_trading.strategy_runner import LiveStrategyRunner + + +class DummyStrategy: + def __init__(self): + self.df = None + self.strategy_config = {} + + def get_required_informatives(self): + return [] + + def populate_indicators(self): pass + def populate_entry_trend(self): pass + def populate_exit_trend(self): pass + + def get_strategy_name(self): + return "dummy" + + def build_trade_plan_live(self, row, ctx): + return "PLAN" + + + +def test_live_strategy_runner_returns_last_row_and_plan(): + df = pd.DataFrame( + { + "time": [ + datetime(2025, 1, 1, 12, 0, tzinfo=timezone.utc), + datetime(2025, 1, 1, 12, 5, tzinfo=timezone.utc), + ], + "signal": ["long", "long"], + } + ) + + provider = Mock() + provider.fetch.return_value = {"M5": df} + + strategy = DummyStrategy() + + runner = LiveStrategyRunner( + strategy=strategy, + data_provider=provider, + symbol="EURUSD", + ) + + result = runner.run() + + assert result.plan == "PLAN" + assert result.last_row["signal"] == "long" \ No newline at end of file diff --git a/core/live_trading/tests/test_trade_repo.py b/core/live_trading/tests/test_trade_repo.py new file mode 100644 index 0000000..f9978dd --- /dev/null +++ b/core/live_trading/tests/test_trade_repo.py @@ -0,0 +1,25 @@ +from datetime import datetime + +from core.live_trading.trade_repo import TradeRepo + + +def test_trade_repo_is_restart_safe(tmp_path): + repo = TradeRepo(data_dir=tmp_path) + + repo.record_entry( + trade_id="1", + symbol="EURUSD", + direction="long", + entry_price=100, + volume=1, + sl=95, + tp1=105, + tp2=110, + entry_time=datetime.utcnow(), + entry_tag="test", + ) + + repo2 = TradeRepo(data_dir=tmp_path) + + active = repo2.load_active() + assert "1" in active \ No newline at end of file diff --git a/core/live_trading/tests/test_trade_state.py b/core/live_trading/tests/test_trade_state.py new file mode 100644 index 0000000..d7c34e1 --- /dev/null +++ b/core/live_trading/tests/test_trade_state.py @@ -0,0 +1,27 @@ +from core.live_trading.execution.live.trade_state_service import TradeStateService + + +def test_has_active_position_true(mocker): + repo = mocker.Mock() + repo.load_active.return_value = { + "1": {"symbol": "EURUSD"} + } + + svc = TradeStateService(repo=repo, adapter=mocker.Mock()) + + assert svc.has_active_position("EURUSD") is True + + +def test_update_sl_updates_repo_and_calls_adapter(mocker): + repo = mocker.Mock() + repo.load_active.return_value = { + "1": {"ticket": "1", "sl": 95} + } + + adapter = mocker.Mock() + svc = TradeStateService(repo=repo, adapter=adapter) + + svc.update_sl(trade_id="1", new_sl=100) + + adapter.modify_sl.assert_called_once() + repo.save_active.assert_called_once() \ No newline at end of file diff --git a/core/logging/__init__.py b/core/logging/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/logging/config.py b/core/logging/config.py new file mode 100644 index 0000000..8bb23ca --- /dev/null +++ b/core/logging/config.py @@ -0,0 +1,12 @@ +from dataclasses import dataclass +from pathlib import Path + + +@dataclass +class LoggerConfig: + stdout: bool = True + file: bool = False + timing: bool = True + profiling: bool = False + log_dir: Path | None = None + level: int = 20 \ No newline at end of file diff --git a/core/logging/null_logger.py b/core/logging/null_logger.py new file mode 100644 index 0000000..52ddae6 --- /dev/null +++ b/core/logging/null_logger.py @@ -0,0 +1,23 @@ +from contextlib import contextmanager + + +class NullLogger: + def debug(self, msg: str): pass + def info(self, msg: str): pass + def warning(self, msg: str): pass + def error(self, msg: str): pass + def log(self, msg: str): pass + + def with_context(self, **ctx): + return self + + @contextmanager + def time(self, label: str): + yield + + @contextmanager + def section(self, name: str): + yield + + def get_timings(self) -> dict[str, float]: + return {} \ No newline at end of file diff --git a/core/logging/prefix.py b/core/logging/prefix.py new file mode 100644 index 0000000..73771af --- /dev/null +++ b/core/logging/prefix.py @@ -0,0 +1,8 @@ +LOG_PREFIX = { + "DATA": "📈 DATA |", + "BT": "🧪 BACKTEST |", + "STRAT": "📐 STRATEGY |", + "REPORT": "📊 REPORT |", + "RUNNER": "🚀 RUN |", + "LIVE": "🟢 LIVE |", +} \ No newline at end of file diff --git a/core/logging/profiling.py b/core/logging/profiling.py new file mode 100644 index 0000000..db0c374 --- /dev/null +++ b/core/logging/profiling.py @@ -0,0 +1,18 @@ +from contextlib import contextmanager +import cProfile +import pstats + + +@contextmanager +def profiling(enabled: bool, path): + if not enabled: + yield + return + + pr = cProfile.Profile() + pr.enable() + yield + pr.disable() + stats = pstats.Stats(pr) + stats.sort_stats("cumtime") + stats.dump_stats(path) \ No newline at end of file diff --git a/core/logging/run_logger.py b/core/logging/run_logger.py new file mode 100644 index 0000000..945e8f9 --- /dev/null +++ b/core/logging/run_logger.py @@ -0,0 +1,152 @@ + +import logging +from contextlib import contextmanager +from time import perf_counter + + +from core.logging.config import LoggerConfig + + +class RunLogger: + """ + Unified logger for backtest and live. + + Features: + - stdout / file logging + - log levels (debug/info/warning/error) + - structured context (symbol, trade_id, etc.) + - timing (step, time, section) + - optional profiling + """ + + def __init__( + self, + name: str, + cfg: LoggerConfig, + prefix: str = "", + *, + context: dict | None = None, + logger: logging.Logger | None = None, + ): + self.cfg = cfg + self.name = name + self.prefix = prefix + self.context = context or {} + + self._t0 = perf_counter() + self._t_last = self._t0 + self._timings: dict[str, float] = {} + + if logger is not None: + # child logger with shared handlers + self.logger = logger + return + + self.logger = logging.getLogger(name) + self.logger.setLevel(cfg.level) + self.logger.handlers.clear() + self.logger.propagate = False + + if cfg.stdout: + h = logging.StreamHandler() + h.setFormatter(logging.Formatter("%(message)s")) + self.logger.addHandler(h) + + if cfg.file: + if cfg.log_dir is None: + raise ValueError("LoggerConfig.file=True requires log_dir") + + cfg.log_dir.mkdir(parents=True, exist_ok=True) + path = cfg.log_dir / f"{name}.log" + + fh = logging.FileHandler(path, encoding="utf-8") + fh.setFormatter( + logging.Formatter("%(asctime)s | %(message)s") + ) + self.logger.addHandler(fh) + + # ================================================== + # Core emit + # ================================================== + + def _emit(self, level: int, msg: str): + if self.prefix: + msg = f"{self.prefix} {msg}" + + if self.context: + ctx = " ".join(f"{k}={v}" for k, v in self.context.items()) + msg = f"{msg} | {ctx}" + + self.logger.log(level, msg) + + # ================================================== + # Public log API + # ================================================== + + def debug(self, msg: str): + self._emit(logging.DEBUG, msg) + + def info(self, msg: str): + self._emit(logging.INFO, msg) + + def warning(self, msg: str): + self._emit(logging.WARNING, msg) + + def error(self, msg: str): + self._emit(logging.ERROR, msg) + + # Backward compatibility + def log(self, msg: str): + self.info(msg) + + # ================================================== + # Context + # ================================================== + + def with_context(self, **ctx) -> "RunLogger": + return RunLogger( + name=self.name, + cfg=self.cfg, + prefix=self.prefix, + context={**self.context, **ctx}, + logger=self.logger, + ) + + # ================================================== + # Timing helpers + # ================================================== + + def step(self, label: str): + if not self.cfg.timing: + return + + now = perf_counter() + delta = now - self._t_last + total = now - self._t0 + + self.info( + f"⏱️ {label:<30} +{delta:6.2f}s | total {total:6.2f}s" + ) + self._t_last = now + + @contextmanager + def time(self, label: str): + if not self.cfg.timing: + yield + return + + t0 = perf_counter() + yield + dt = perf_counter() - t0 + self.info(f"⏱️ {label:<30} {dt:6.3f}s") + + @contextmanager + def section(self, name: str): + t0 = perf_counter() + yield + dt = perf_counter() - t0 + self._timings[name] = self._timings.get(name, 0.0) + dt + self.info(f"⏱️ section {name} {dt:6.3f}s") + + def get_timings(self) -> dict[str, float]: + return dict(self._timings) \ No newline at end of file diff --git a/core/logging/telegram_handler.py b/core/logging/telegram_handler.py new file mode 100644 index 0000000..ca34e02 --- /dev/null +++ b/core/logging/telegram_handler.py @@ -0,0 +1,3 @@ +class TelegramHandler(logging.Handler): + def emit(self, record): + send_to_telegram(self.format(record)) \ No newline at end of file diff --git a/core/strategy/orchestration/strategy_execution.py b/core/strategy/orchestration/strategy_execution.py index 835d39f..5c5e121 100644 --- a/core/strategy/orchestration/strategy_execution.py +++ b/core/strategy/orchestration/strategy_execution.py @@ -2,17 +2,13 @@ from core.strategy.orchestration.informatives import apply_informatives -def execute_strategy( - *, - strategy, - df: pd.DataFrame, - data_by_tf: dict[str, pd.DataFrame], -) -> pd.DataFrame: - """ - Shared strategy execution pipeline. - Used by backtest and live. - """ +def execute_strategy(*, strategy, df, **kwargs): + if kwargs: + raise TypeError( + f"execute_strategy does not accept extra args: {list(kwargs)}" + ) + df = apply_informatives( df=df, strategy=strategy, @@ -26,3 +22,5 @@ def execute_strategy( strategy.populate_exit_trend() return strategy.df + + diff --git a/core/strategy/tests/test_execute_strategy.py b/core/strategy/tests/test_execute_strategy.py deleted file mode 100644 index e0ea116..0000000 --- a/core/strategy/tests/test_execute_strategy.py +++ /dev/null @@ -1,32 +0,0 @@ -import pandas as pd -from core.strategy.orchestration.strategy_execution import execute_strategy -from Strategies.Samplestrategyreport import Samplestrategyreport -from core.strategy.tests.helper import DummyProvider - - -def test_execute_strategy_pipeline(): - df = pd.DataFrame({ - "time": pd.date_range( - "2024-01-01", periods=100, freq="1min", tz="UTC" - ), - "open": 100.0, - "high": 101.0, - "low": 99.0, - "close": 100.0, - "volume": 1.0, - }) - - strat = Samplestrategyreport( - df=df.copy(), - symbol="XAUUSD", - startup_candle_count=10, - ) - - out = execute_strategy( - strategy=strat, - df=df, - data_by_tf={"M30": df}, - - ) - - assert "signal_entry" in out.columns diff --git a/docs/architecture/pipelines.md b/docs/architecture/pipelines.md index 0058cad..757bdc2 100644 --- a/docs/architecture/pipelines.md +++ b/docs/architecture/pipelines.md @@ -9,147 +9,194 @@ README intentionally keeps pipelines simplified. ```mermaid flowchart LR - A[run] --> D - - %% ===================== - %% DATA - %% ===================== - subgraph D[Data Layer] - D1[Create backend] --> D2[OHLCV Provider + Cache] - D2 --> D3[Load OHLCV
per symbol] - D3 --> D4[all_data] - end - - %% ===================== - %% STRATEGY - %% ===================== - D4 --> S - subgraph S[Strategy Layer] - S1{Single / Multi symbol?} - S2[run_strategy_single] - S3[Parallel execution
ProcessPoolExecutor] - S4[signals_df] - S5[return dataframe
with entry signals
and exit plan] - - S1 -->|single| S2 --> S4 - S1 -->|multi| S3 --> S4 - S4 --> S5 - end - - %% ===================== - %% RESEARCH PLOT MODE - %% ===================== - S --> P{Plot charts
symbol only
research mode} - P -->|yes| PL[Plot charts
PNG artifacts] - PL --> END1[Exit] - - %% ===================== - %% BACKTEST - %% ===================== - P -->|no| B - subgraph B[Execution / Backtest] - B1{BACKTEST_MODE} - B2[Single window] - B3[Split windows] - B4[Backtester.run] - B5[return dataframe
with trades] - - B1 -->|single| B2 --> B4 - B1 -->|split| B3 --> B4 - B4 --> B5 - end - - %% ===================== - %% REPORT - %% ===================== - B --> R{Generate report?} - R -->|no| END2[Exit: backtest only] - R -->|yes| REP - - subgraph REP[Risk & Reporting] - R1[RiskDataPreparer] - R2[TradeContextEnricher] - R3[ReportRunner] - R4[Metrics, tables, charts] - R5[render reports:
stdout tables or html dashboard] - - R1 --> R2 --> R3 --> R4 --> R5 - end - - R5 --> END3[Exit: full run] + + RUN[BacktestRunner.run] + + RUN --> DATA[Data Loading] + DATA --> STRAT[Strategy Execution] + STRAT --> BT[Backtesting Engine] + BT --> RES[Result Build] + RES --> REP[Reporting] + + subgraph DATA[Data Layer] + DL1[BacktestStrategyDataProvider] + DL2[CsvMarketDataCache] + DL3[Backend Fetch] + DL1 --> DL2 + DL1 --> DL3 + end + + subgraph STRAT[Strategy Layer] + S1[run_strategies] + S2[run_strategy_single / parallel] + S3[apply_informatives] + S4[populate_indicators] + S5[populate_entry_trend] + S6[populate_exit_trend] + S7[build_trade_plans] + + S1 --> S2 + S2 --> S3 --> S4 --> S5 --> S6 --> S7 + end + + subgraph BT[Execution Layer] + B1[Backtester] + B2[run_backtests_single / parallel] + B4[execution_loop] + + B2 --> B1 + B1 --> B4 + end + + subgraph REP[Reporting Layer] + R1[ReportRunner] + R2[Per‑symbol reports] + R3[SummaryReportRunner] + + R1 --> R2 + R1 --> R3 + end ``` --- ## LiveTradingRunner — detailed flow ```mermaid flowchart LR - A[run] --> M - - %% ===================== - %% MT5 INIT - %% ===================== - subgraph M[MT5 Init] - M1[mt5.initialize] --> M2[symbol_select] - M2 --> M3[account_info
log balance] - end - - %% ===================== - %% WARMUP DATA - %% ===================== - M --> W - subgraph W[Warmup Data] - W1[Resolve timeframe + lookback] --> W2[copy_rates_from_pos
bars] - W2 --> W3[Build DataFrame
UTC time] - W3 --> W4[df_execution] - end - - %% ===================== - %% INFORMATIVE PROVIDER - %% ===================== - W4 --> I - subgraph I[Informative Provider] - I1[load_strategy_class] --> I2[get_required_informatives] - I2 --> I3[compute bars_per_tf
lookback + MIN_HTF_BARS] - I3 --> I4[MT5Client
bars_per_tf] - end - - %% ===================== - %% STRATEGY - %% ===================== - I4 --> S - W4 --> S - subgraph S[Strategy Layer] - S1[load_strategy
df_execution + symbol + startup_candle_count] --> S2[strategy] - end - - %% ===================== - %% ENGINE BUILD - %% ===================== - S2 --> E - subgraph E[Execution / Live Engine] - E1[MT5Adapter
dry_run flag] - E2[TradeRepo] - E3[PositionManager
repo + adapter] - E4[LiveStrategyAdapter
wrap strategy] - E5[market_data_provider
last closed candle] - E6[LiveEngine
tick_interval_sec] - - E1 --> E3 - E2 --> E3 - E4 --> E6 - E5 --> E6 - E3 --> E6 - end - - %% ===================== - %% RUN LOOP (conceptual) - %% ===================== - E --> R - subgraph R[Run Loop] - R1[engine.start] --> R2{Tick} - R2 --> R3[market_data_provider] - R3 --> R4[strategy_adapter
intents] - R4 --> R5[position_manager
orders/positions] - R5 --> R2 - end + + %% ================================================= + %% ENTRY POINT + %% ================================================= + A[LiveTradingRunner.run] + + %% ================================================= + %% INIT SECTION + %% ================================================= + subgraph INIT[Initialization] + + B[MT5 initialize] + C[Symbol select] + D[Load Strategy Class] + E[Build Live Logger] + + end + + A --> INIT + INIT --> B --> C + INIT --> D + INIT --> E + + %% ================================================= + %% DATA LAYER + %% ================================================= + subgraph DATA[Data Layer] + + F[Strategy declares required timeframes] + G[MT5Client] + H[LiveStrategyDataProvider] + I[MT5MarketStateProvider] + + end + + D --> F + G --> H + F --> H + A --> H + A --> I + + %% ================================================= + %% STRATEGY LAYER + %% ================================================= + subgraph STRATEGY[Strategy Layer] + + J[LiveStrategyRunner] + + subgraph DOMAIN[Strategy Domain *shared with backtest*] + + K[apply_informatives] + L[populate_indicators] + M[populate_entry_trend] + N[populate_exit_trend] + O[build_trade_plans] + + K --> L --> M --> N --> O + end + + end + + H --> J + D --> J + J --> K + + %% ================================================= + %% EXECUTION LAYER + %% ================================================= + subgraph EXECUTION[Execution Layer] + + P[LiveEngine] + Q[PositionManager] + R[TradeRepo / TradeStateService] + S[MT5Adapter] + T[MetaTrader5 API] + + end + + %% ================================================= + %% RUNTIME FLOW + %% ================================================= + + %% Market state into engine + I --> P + + %% Trade plans into engine + O --> P + + %% Engine orchestration + P --> Q + Q --> R + Q --> S + S --> T + + %% ================================================= + %% FEEDBACK LOOPS + %% ================================================= + + T -->|broker-driven exits| Q + P -->|tick loop| P +``` + +## BacktestDataProvider — detailed flow +```mermaid +flowchart TB + %% ===== Nodes ===== + U[Caller / Runner] --> P[BacktestStrategyDataProvider.get_ohlcv] + + P -->|1 cache.coverage symbol, timeframe| C[CsvMarketDataCache] + C -->|returns: None or cov_start, cov_end| P + + %% ===== Decision: cache coverage ===== + P --> D1{Coverage exists?} + + D1 -- No --> B1[backend.fetch_ohlcv full range] + B1 --> N1[_validate/_normalize provider-side] + N1 -->|cache.save... if fetched| C + N1 --> R1[return merged DF] + + D1 -- Yes --> D2{Missing pre-range?} + D2 -- Yes --> B2[backend.fetch_ohlcv pre range] + B2 --> N2[_validate/_normalize] + N2 -->|cache.append pre if non-empty| C + + D2 -- No --> D3{Missing post-range?} + + D3 -- Yes --> B3[backend.fetch_ohlcv post range] + B3 --> N3[_validate/_normalize] + N3 -->|cache.append post if non-empty| C + + %% ===== Load middle part ===== + D3 -- No --> M[cache.load_range requested range] + N2 --> M + N3 --> M + + M --> R2[merge pre + mid + post] + R2 --> R3[return merged DF] ``` \ No newline at end of file diff --git a/docs/dev/how-to-run.md b/docs/dev/how-to-run.md index 9377b86..d86e694 100644 --- a/docs/dev/how-to-run.md +++ b/docs/dev/how-to-run.md @@ -1,36 +1,61 @@ # Developer setup -This document describes a minimal local setup for running the framework. +This document describes a **minimal local setup** required to run the framework +for backtesting, research and (optionally) live trading. + +The project is designed to run deterministically in offline mode on any OS, +with live trading supported via MetaTrader 5 on Windows. --- ## Requirements -- Python 3.10+ (recommended) -- Windows + MetaTrader 5 (for live trading via MT5) -- Any OS (for backtests, research, reporting) +### Core +- Python **3.10+** (recommended) +- Git + +### Backtesting / Research +- Any OS (Windows, macOS, Linux) + +### Live trading (optional) +- Windows +- MetaTrader 5 terminal +- Broker account supported by MT5 --- -## Install +## Environment setup -Create and activate a virtual environment, then install dependencies: +Create and activate a virtual environment: ```bash python -m venv .venv -# Windows: -.venv\\Scripts\\activate -# macOS/Linux: -source .venv/bin/activate -pip install -U pip -pip install -r requirements.txt +# Windows +.venv\Scripts\activate + +# macOS / Linux +source .venv/bin/activate ``` + +## Configuration + +A default backtest configuration is provided in: +The configuration is **predefined and ready to use** for the first run: +- includes a sample strategy +- defines symbols, timeframes and timerange +- points to default data and results directories + +No configuration changes are required to run the initial backtest. +The file serves both as a runnable default and as a reference +for further experimentation. ## Backtest run (minimal) -Run a small backtest first to validate the environment. -Use a small timerange and a single symbol to reduce debug surface. +Start with a small backtest to validate the environment and data pipeline. + +Using a limited timerange and a single symbol is recommended for the first run, +as historical data may need to be downloaded and cached. + ```bash python backtest_run.py -``` - +``` \ No newline at end of file diff --git a/docs/dev/performance.md b/docs/dev/performance.md new file mode 100644 index 0000000..f414c83 --- /dev/null +++ b/docs/dev/performance.md @@ -0,0 +1,183 @@ +# Performance & Profiling + +This document describes **observed performance characteristics**, profiling results +and optimization strategy of the framework. + +Performance is treated as a **first-class architectural concern**, not as an afterthought. + +--- + +## Test scenario + +Representative backtest run used for profiling: + +- Symbols: 2 (EURUSD, XAUUSD) +- Base timeframe: M1 +- Informative timeframe: M30 +- Historical range: ~6 years +- OHLCV rows: + - ~520k bars per symbol (M1) + - ~18k bars per symbol (M30) +- Strategy: + - multi-timeframe + - feature-heavy (market structure, volatility, regimes) +- Execution: + - full cost model + - parallel backtest execution + +--- + +## End-to-end runtime breakdown + +Measured wall-clock times (single run): + +| Stage | Time | +|---------------------------------|----------| +| Data loading & caching | ~22s | +| Strategy execution (features + signals + plans) | ~25s | +| Backtest execution (parallel) | ~1.9s | +| Reporting & persistence | <1s | +| **Total runtime** | **~50s** | + +Timing is collected using structured instrumentation across pipeline stages. + +--- + +## Profiling methodology + +Profiling was performed using Python `cProfile` with cumulative time analysis: + +- Full pipeline profiling (not isolated microbenchmarks) +- Focus on **hot paths**, not function call counts +- Profiling reflects **real workload**, not synthetic data + +--- + +## Profiling summary (high level) + +Profiling confirms that execution time is dominated by **expected domains**: + +1. **Data access & preprocessing** + - Historical OHLCV loading + - Cache merging and validation + +2. **Feature engineering** + - Market structure analysis + - Dependency-aware feature computation + +3. **Vectorized pandas operations** + - Series access + - Column-wise transformations + +Orchestration, execution control flow and trade simulation introduce **negligible overhead**. + +This validates the architectural separation between: +- deterministic computation +- orchestration +- execution side effects + +--- + +## Hot paths (qualitative) + +Key contributors to cumulative runtime: + +- Historical data provider (`fetch`, `_get_ohlcv`) +- Strategy `populate_indicators` +- FeatureEngineering pipeline +- MarketStructureEngine (pivots, price action, regimes) +- Pandas Series access and combination + +Notably **absent** from hot paths: + +- Strategy orchestration logic +- Execution engine control flow +- Trade repository operations +- Logging and instrumentation + +This indicates a **healthy performance profile**: +time is spent where domain complexity lives. + +--- + +## Feature engineering cost model + +Feature computation dominates strategy runtime by design. + +Key properties: + +- Deterministic execution order +- Explicit feature dependencies +- No implicit recomputation +- Reusable context across features + +Market structure analysis is treated as a **feature extraction problem**, +not as strategy logic, making it: + +- measurable +- optimizable +- replaceable + +--- + +## Pandas overhead + +Profiling shows measurable overhead in: + +- `Series.__getitem__` +- `Series.combine` +- Generic dataframe access + +This overhead is: +- explicit +- measurable +- localized to feature computation + +It does **not** leak into orchestration or execution layers. + +--- + +## Optimization strategy + +Optimization is intentionally **incremental and targeted**. + +Planned directions: + +1. **Numba acceleration** + - Only for confirmed hot paths + - Focus on pivot detection and structural calculations + - Preserve deterministic semantics + +2. **Data representation** + - Evaluate narrower arrays for selected features + - Avoid premature rewrites + +3. **Batch execution** + - Reuse intermediate feature context where possible + - Avoid recomputation across symbols and timeframes + +Because hot paths are explicitly identified, +optimization can be applied without architectural refactors. + +--- + +## Design takeaway + +Profiling confirms that: + +- Architecture scales as intended +- Performance cost is dominated by domain logic, not framework overhead +- Feature engineering is the correct optimization target +- Execution and orchestration layers remain lightweight + +This allows performance work to proceed **safely and predictably**, +without compromising correctness or determinism. + +--- + +## Notes + +Performance measurements are hardware-dependent and provided for +architectural insight rather than absolute benchmarking. + +All measurements were obtained from real end-to-end runs. \ No newline at end of file diff --git a/docs/dev/profiling.md b/docs/dev/profiling.md deleted file mode 100644 index fd343d9..0000000 --- a/docs/dev/profiling.md +++ /dev/null @@ -1,55 +0,0 @@ -# Profiling and performance workflow - -Performance is treated as an engineering concern: -measure first, optimize second. - ---- - -## Principles - -- Prefer vectorized pandas over row-wise loops -- Reduce dataframe copies and intermediate allocations -- Cache intermediate results via DAG context -- Use numba only for identified hot paths - ---- - -## What to measure - -- total pipeline runtime (end-to-end) -- feature module runtime distribution -- strategy runtime -- execution engine runtime -- peak memory usage (large datasets) - ---- - -## Practical workflow - -1) Run a small dataset to validate correctness. -2) Run a medium dataset to locate hotspots. -3) Profile the hottest functions/modules. -4) Optimize one hotspot at a time. -5) Re-run with the same input to confirm deterministic behavior. - ---- - -## Benchmark hygiene - -When measuring performance: -- use the same dataset and timerange -- disable background tasks -- run multiple times and compare variance -- track results in a simple log or markdown table - ---- - -## Stress tests - -Stress tests validate stability under load: -- intentionally high signal density -- large number of trades -- long timeranges - -The goal is not “pretty backtest results”. -The goal is stable performance and predictable execution behavior. \ No newline at end of file diff --git a/docs/index.html b/docs/index.html index 0078aef..cce2241 100644 --- a/docs/index.html +++ b/docs/index.html @@ -13,7 +13,7 @@
diff --git a/docs/readme_old.md b/docs/readme_old.md deleted file mode 100644 index f91050a..0000000 --- a/docs/readme_old.md +++ /dev/null @@ -1,478 +0,0 @@ -# Python Trading Research & Execution Framework - -A **modular, end-to-end quantitative research and execution framework** written in Python. - -## What This Project Showcases - -### Software Engineering -- clean, layered architecture -- modular and extensible design -- separation of research, execution, and infrastructure concerns -- deterministic and reproducible pipelines -- multiprocessing & performance optimization -- stateful systems with crash-safe persistence - -### Data Engineering -- vectorized time-series processing (`pandas`, `numpy`) -- multi-timeframe data handling -- bias-aware backtesting design -- realistic execution simulation -- statistical analysis and reporting -- visual diagnostics (charts & plots) (SAMPLE https://folg-code.github.io/pandas-quant-trading-framework/) - ---- - -## Technology Stack - -- **Python 3.10+** -- **pandas** – time-series data processing -- **numpy** – numerical computation -- **numba** – JIT-compiled performance-critical paths -- **matplotlib** – charting & visualization -- **rich** – structured console reporting -- **multiprocessing** – parallel backtests & strategy runs -- **MetaTrader5 API** – optional live execution backend - ---- - -## High-Level Capabilities - -This framework supports the **entire lifecycle** of a quantitative trading system: - -1. **Historical data ingestion** -2. **Multi-timeframe feature computation** -3. **Strategy research & signal generation** -4. **Backtesting with realistic execution logic** -5. **Statistical analysis & reporting** -6. **Visual diagnostics (charts)** -7. **Dry-run simulation** -8. **Live trading with persistent state** - -Each stage is implemented as a **decoupled module**, allowing independent testing and extension. - - - -## Core Modules - -### 1. Data Layer - -Responsible for **data acquisition and caching**: - -- historical OHLCV download -- pluggable backends (e.g. Dukascopy, MT5) -- unified interface for backtest & live data -- local cache for reproducibility and speed - -The rest of the system never depends on a concrete data source. - ---- - -### 2. Feature / Market Structure Engine - -A **deterministic, dependency-aware computation engine**: - -- batch-based processing (no DataFrame mutation) -- explicit feature dependencies -- vectorized time-series operations - -Used to generate reusable features for: -- research -- backtesting -- live trading - -This module demonstrates **complex data pipelines built on pandas**. - ---- - -### 3. Strategy Layer - -Strategies are implemented as **pure data transformations**: - -- consume DataFrames -- produce signals & execution plans -- no knowledge of execution or portfolio state - -Supports: -- multi-timeframe strategies -- reusability between backtest, dry-run, and live trading -- deterministic behavior - ---- - -### 4. Backtesting Engine - -Designed for **correctness and realism**, not shortcuts: - -- partial exits -- break-even logic -- slippage modeling -- trade de-duplication per signal tag -- multiprocessing across symbols - -Performance-critical exit simulation is implemented with **Numba JIT**. - ---- - -### 5. Reporting & Analytics - -The reporting layer provides a structured, extensible analytics framework for -evaluating trading strategies at multiple levels of granularity. - -Core capabilities include: - -- equity curve computation and visualization -- drawdown analysis (absolute, percentage, structure & failure modes) -- expectancy, profit factor, win rate and R-distribution -- per-symbol, per-entry-tag and per-exit-tag performance diagnostics -- conditional performance analysis across: - - time (hour of day, day of week) - - market regimes / strategy contexts -- contribution analysis: - - PnL contribution by signal, tag and regime - - drawdown contribution by signal and exit logic -- stability analysis across rolling and sliced backtest windows - -The reporting system is designed around **single-source-of-truth data structures**, -ensuring consistent results across all outputs. - -Reports are available as: -- rich console tables (stdout renderer) -- persisted data artifacts (JSON / structured Python objects) -- interactive HTML dashboards with charts and tables -- reusable data structures for further research, comparison and automation - -Planned extensions: -- side-by-side strategy comparison (A/B testing) -- comparison against benchmarks (buy & hold, index, synthetic baseline) -- equity curve overlays and metric-level diffs - ---- - -### 6. Visualization - -Charting utilities support: - -- price charts -- signal overlays -- trade entry / exit visualization -- diagnostic plots for research validation - -Plots are generated automatically during backtests. - ---- - -### 7. Dry-Run Mode - -A full **execution simulation without broker interaction**: - -- uses the same execution pipeline as live trading -- validates logic, risk management, and state transitions -- safe environment for end-to-end testing - ---- - -### 8. Live Trading Engine - -A production-style orchestration layer: - -- candle-based strategy execution -- tick-based position management -- risk-based position sizing -- partial closes and stop updates -- broker abstraction via adapter - -Live trading uses a **persistent state repository** to ensure restart safety. - ---- - -### 9. Persistence Layer - -State management is implemented via a lightweight repository: - -- JSON-based storage -- atomic writes -- crash-safe design -- single source of truth for live positions - -Demonstrates **robust state handling in long-running systems**. - ---- - -## Performance & Profiling (Reference) - -This project includes **end-to-end runtime profiling** to track performance -regressions and architectural changes over time. - -### Test Context -- Market: **EURUSD** -- Timeframes: **M5 + M30 informatives** -- Timerange : **2019:01:01 - 2025:12:31** -- Dataset size: - - M5: ~734,000 bars - - M30: ~123,000 bars -- Mode: single-symbol, single-process -- Strategy: HTS / structure-based strategy - -### Signal & Trade Volume (Intentional Stress Test) -- **Generated signals:** ~302,000 -- **Executed trades:** ~49,000 - - The signal and trade counts are **intentionally unrealistically high**. - This setup is used as a **performance stress test**, not as a realistic - trading configuration. - - A higher number of trades directly increases: - - execution simulation complexity - - state transitions - - partial exit calculations - - reporting and statistics computation - - This makes runtime measurements **more representative for worst-case - performance scenarios**. - -### Runtime Breakdown (Approximate) -- Data loading: ~4.3s -- Informative timeframes (M30): ~2.5s -- Feature computation (`populate_indicators`): ~4.6s -- Entry logic & signal generation: ~3.5s -- Backtest engine (execution simulation): ~1.8s -- Reporting & analytics: ~2.7s - -**Total runtime:** ~20 seconds - -Most of the runtime is spent on **feature computation and market structure logic**, -which is expected at the current development stage. -Backtest execution itself is **not a bottleneck**, even with a very large number -of generated signals and trades. - -This profiling snapshot serves as a **baseline reference**. -Performance numbers may change as features, execution layers, -or infrastructure evolve. - ---- - -## What This Repository Is (and Is Not) - -**This is:** -- a systems design project -- a data engineering & research framework -- a demonstration of Python engineering skills - -**This is not:** -- a commercial trading bot -- a black-box ML system -- a promise of trading profitability - ---- - -## Disclaimer - -This project is provided for **demonstration and educational purposes only**. -Any use in live markets is at the user's own risk. - ---- - -## Roadmap / TODO - -This roadmap defines the long-term development plan for the project. -The order is intentional and optimized for: -- fast feedback loops -- minimal overengineering -- controlled growth of system complexity -- clear separation between research, analytics, execution, and infrastructure - ---- - -## Phase 0 — Baseline Stabilization -**Goal:** Establish a fixed reference point for all further work. - -- Finalize TechnicalAnalysis refactor, focusing on architecture cleanup -- Freeze one strategy, one market, one timeframe -- Define a fixed historical dataset -- Create a baseline backtest result -- Treat this result as the reference benchmark - -_No new features, or research in this phase._ - ---- - -## Phase 1 — Analytics & Reporting Foundation -**Goal:** Build reusable analytical tools and frameworks that will later -enable systematic, repeatable, and extensible quantitative research. - - -- **Strategy comparison & benchmarking** - - side-by-side strategy comparison (A/B) on identical datasets - - metric-level diffs: - - expectancy - - win rate - - drawdown - - profit factor - - R distribution - - equity curve overlays and relative performance plots - - comparison against benchmarks: - - buy & hold - - index-based benchmark - - synthetic baseline (flat / random / risk-free) - ---- - -## Phase 2 — Feature Engineering (No ML) -**Goal:** Identify informative and stable features. - -- Extract contextual features from the core engine: - - market structure - - trend regime - - volatility regime - - distance to key levels - - time-based features (session, bar index) -- Analyze feature behavior: - - expectancy by quantiles - - stability across time - - redundancy and correlation -- Remove weak, unstable, or redundant features -- Build a final feature set (approximately 5–20 features) - -No machine learning is used in this phase. - ---- - -## Phase 3 — Machine Learning (Decision Trees) -**Goal:** Improve trade selection via probabilistic scoring. - -- Define ML targets (e.g. win/loss, R > threshold) -- Train tree-based models: - - Random Forest - - XGBoost / LightGBM -- Perform strict out-of-sample and walk-forward validation -- Analyze feature importance and decision boundaries -- Integrate ML as a scoring / filtering layer (not signal generation) -- Discard ML if it does not improve robustness or equity behavior - -ML is used to strengthen edge, not to create it. - ---- - -## Phase 4 — Market Research (Iterative) -**Goal:** Systematically explore and validate new market hypotheses. - -- Formulate explicit hypotheses about price behavior -- Translate hypotheses into measurable features -- Validate ideas using the analytics layer -- Discard ideas without measurable edge -- Iterate only through data-driven feedback - -Research is always subordinate to analytics. - ---- - -## Phase 5 — Execution Layer & Containerization -**Goal:** Production-ready execution independent of Windows. - -- Implement execution adapter (cTrader Open API) -- Add risk and rule guards -- Introduce Docker-based runtime for the engine -- Support live and paper trading modes -- Add execution health checks and fail-safe mechanisms - -Execution logic remains thin, deterministic, and broker-agnostic. - ---- - -## Phase 6 — Runtime Control & Telegram Notifications -**Goal:** Safe live operation and real-time supervision. - -### Strategy Runtime Control -- Enable / disable strategies at runtime -- Temporary pause / resume trading -- Scheduled trading blackout windows (e.g. high-impact news) -- Strategy reload without full system restart - -### Trade Operations -- Manual trade close (full or partial) -- Emergency close-all positions -- Per-strategy and global kill switch - -### Telegram Notification System -- Trade lifecycle notifications: - - trade opened - - trade closed - - execution errors -- Risk alerts: - - daily drawdown thresholds - - max drawdown proximity -- Strategy state changes: - - paused - - resumed - - disabled - - reloaded -- Connectivity and execution health alerts -- Configurable notification levels (info / warning / critical) - -Runtime control must be deterministic, auditable, and broker-agnostic. - ---- - -## Phase 6.5 — Backtest vs Live Consistency Validation -**Goal:** Detect execution drift and hidden performance degradation. - -- Align backtest, dry-run, and live trade datasets -- Compare key metrics: - - trade count - - winrate - - expectancy - - R distribution - - drawdown profile -- Detect missing or extra trades in live vs backtest -- Analyze timing and slippage differences -- Define acceptable deviation thresholds -- Trigger Telegram alerts on significant divergence -- Generate consistency comparison reports and plots - -This phase acts as an early warning system before scaling live trading. - ---- - -## Phase 7 — Django Control Plane -**Goal:** Operational usability and orchestration. - -- Django models: - - Strategy - - Backtest - - Trade - - Report - - RuntimeState -- Asynchronous backtest manager -- Dashboard for analytics, reports, and comparisons -- GUI for: - - starting / stopping strategies - - pausing / resuming trading - - manual trade management - - triggering strategy reloads -- compliance monitoring -- Centralized Telegram notification configuration - -Django acts as a control plane, not a computation engine. - ---- - -## Phase 8 — Advanced System Intelligence (Future) -**Goal:** Improve robustness, scalability, and understanding. - -- Market regime taxonomy beyond trend/range -- Multi-timeframe contextual intelligence -- Dynamic risk and capital allocation -- Explainability for ML-based decisions -- Strategy A/B testing and experimentation framework -- Strategy-as-configuration (DSL / YAML / JSON) -- Stress testing: - - latency - - slippage - - spread shocks -- Human-in-the-loop supervision workflows -- Knowledge graph linking features, setups, regimes, and outcomes - -These enhancements follow only after a stable core is achieved. - ---- - diff --git a/docs/roadmap.md b/docs/roadmap.md deleted file mode 100644 index bdfe8ed..0000000 --- a/docs/roadmap.md +++ /dev/null @@ -1,175 +0,0 @@ -# Roadmap - -This roadmap defines the planned evolution of the framework. -The order is intentional and optimized for: -- fast feedback loops -- architectural correctness -- controlled growth of system complexity -- clear separation between research, analytics, execution and infrastructure - -Completed documentation, architecture and reporting foundations are omitted. - ---- - -## Phase 1 — Test Coverage & Validation - -**Goal:** Establish confidence and prevent architectural regressions. - -- Add comprehensive test coverage: - - unit tests for feature nodes and pure computations - - contract tests for adapters and data providers - - integration tests for pipelines on small, deterministic fixtures -- Add invariant checks: - - no lookahead bias - - deterministic outputs for identical inputs - - schema and contract validation -- Define a minimal CI test suite focused on correctness (not performance) - -No new features or refactors in this phase. - ---- - -## Phase 2 — Technical Analysis Refactor Completion - -**Goal:** Finalize and stabilize the TechnicalAnalysis module architecture. - -- Complete refactor of the TechnicalAnalysis module: - - finalize module boundaries and responsibilities - - unify feature interfaces and outputs - - remove legacy or duplicated logic -- Align all feature modules with the DAG-based execution model -- Ensure full test coverage for the refactored architecture -- Freeze the TechnicalAnalysis API as a stable contract - -No new research or feature expansion in this phase. - ---- - -## Phase 3 — Baseline Stabilization - -**Goal:** Establish a fixed reference point for further development. - -- Freeze: - - one strategy - - one market - - one timeframe -- Define a fixed historical dataset -- Produce a baseline backtest result -- Treat this result as a long-term reference benchmark - -This baseline is used for all future comparisons. - ---- - -## Phase 4 — Analytics & Strategy Evaluation - -**Goal:** Enable systematic, repeatable strategy evaluation. - -- Strategy benchmarking on identical datasets -- Side-by-side comparison of strategy variants -- Metric-level comparisons (returns, drawdowns, expectancy) -- Equity curve overlays and relative performance analysis -- Comparison against simple baselines - -Focus on measurement quality, not feature expansion. - ---- - -## Phase 5 — Feature Engineering (Non-ML) - -**Goal:** Identify stable and informative features. - -- Expand and validate contextual feature sets: - - market structure - - regime and volatility context - - distance and timing-based features -- Analyze feature behavior: - - stability across time - - redundancy and correlation - - conditional expectancy -- Reduce feature set to a small, high-quality core - -No machine learning is introduced in this phase. - ---- - -## Phase 6 — Machine Learning as a Filtering Layer - -**Goal:** Improve trade selection robustness without altering signal logic. - -- Define explicit ML targets -- Train tree-based models (e.g. Random Forest, gradient boosting) -- Perform strict out-of-sample and walk-forward validation -- Integrate ML as a scoring or filtering layer only -- Discard ML if robustness does not improve - -ML strengthens decision quality; it does not create signals. - ---- - -## Phase 7 — Market Research Workflow - -**Goal:** Formalize hypothesis-driven market research. - -- Formulate explicit, testable hypotheses -- Translate hypotheses into measurable features -- Validate ideas using the analytics layer -- Systematically discard non-robust ideas - -Research remains subordinate to analytics and validation. - ---- - -## Phase 8 — Execution Layer Expansion & Portability - -**Goal:** Production-ready execution independent of platform constraints. - -- Add additional broker / execution adapters -- Improve execution safety and rule guards -- Introduce containerized runtime for the engine -- Support live and paper trading consistently -- Add execution health checks and fail-safe mechanisms - -Execution logic remains thin, deterministic and broker-agnostic. - ---- - -## Phase 9 — Runtime Control & Monitoring - -**Goal:** Safe live operation and supervision. - -- Runtime strategy control (enable/disable, pause/resume) -- Manual and emergency trade operations -- Global and per-strategy kill switches -- Event-based notification system -- Configurable alert levels and delivery channels - -All runtime control must be auditable and deterministic. - ---- - -## Phase 10 — Backtest vs Live Consistency Validation - -**Goal:** Detect execution drift and hidden performance degradation. - -- Align backtest, dry-run and live datasets -- Compare execution and performance metrics -- Detect missing or extra trades -- Analyze timing and slippage divergence -- Define acceptable deviation thresholds -- Trigger alerts on significant discrepancies - ---- - -## Phase 11 — Control Plane & Orchestration - -**Goal:** Operational usability and orchestration. - -- Centralized control plane for strategies and runs -- Management of backtests, reports and runtime state -- UI for operational control and analytics access -- Centralized configuration and notification management - -The control plane orchestrates the system; it does not perform computation. - ---- diff --git a/live_state/active_trades.json b/live_state/active_trades.json index 9e26dfe..7eafc84 100644 --- a/live_state/active_trades.json +++ b/live_state/active_trades.json @@ -1 +1,19 @@ -{} \ No newline at end of file +{ + "MOCK_BTCUSD": { + "trade_id": "MOCK_BTCUSD", + "symbol": "BTCUSD", + "direction": "long", + "entry_price": 69138.28, + "volume": 0.0, + "sl": 69138.28, + "tp1": 70122.64081208526, + "tp2": 71107.00162417052, + "tp1_executed": false, + "entry_time": "2026-02-06T17:47:00+00:00", + "entry_tag": "long_mask_rma_33", + "strategy": "Samplestrategyreport", + "strategy_config": {}, + "ticket": "MOCK_BTCUSD", + "be_moved": true + } +} \ No newline at end of file diff --git a/live_state/closed_trades.json b/live_state/closed_trades.json index c16f261..1240b26 100644 --- a/live_state/closed_trades.json +++ b/live_state/closed_trades.json @@ -98,5 +98,25 @@ "exit_time": "2026-02-05 19:35:00+00:00", "exit_reason": "BROKER_CLOSED", "exit_level_tag": "TP2_live" + }, + "MOCK_BTCUSD": { + "trade_id": "MOCK_BTCUSD", + "symbol": "BTCUSD", + "direction": "short", + "entry_price": 68754.98, + "volume": 0.0, + "sl": 68944.97525051115, + "tp1": 68564.98474948884, + "tp2": 68374.98949897768, + "tp1_executed": false, + "entry_time": "2026-02-06T17:43:00+00:00", + "entry_tag": "short_mask", + "strategy": "Samplestrategyreport", + "strategy_config": {}, + "ticket": "MOCK_BTCUSD", + "exit_price": 69137.2, + "exit_time": "2026-02-06 17:47:00+00:00", + "exit_reason": "SL", + "exit_level_tag": "SL_live" } } \ No newline at end of file diff --git a/live_trading_run.py b/live_trading_run.py index dfe1180..17da12b 100644 --- a/live_trading_run.py +++ b/live_trading_run.py @@ -10,7 +10,7 @@ def setup_logging(): os.makedirs("logs", exist_ok=True) formatter = logging.Formatter( - "%(asctime)s | %(levelname)s | %(name)s | %(message)s" + "%(asctime)s | %(levelname)s | %(message)s" ) file_handler = RotatingFileHandler( @@ -27,9 +27,11 @@ def setup_logging(): root = logging.getLogger() root.setLevel(logging.DEBUG) - root.handlers = [file_handler, console] + root.handlers.clear() + root.addHandler(file_handler) + root.addHandler(console) -setup_logging() if __name__ == "__main__": + setup_logging() LiveTradingRunner(cfg).run() \ No newline at end of file diff --git a/readme.md b/readme.md index 7007bc8..2cf4581 100644 --- a/readme.md +++ b/readme.md @@ -2,60 +2,188 @@ High-performance, deterministic Python framework for systematic trading. Unified pipeline: **data → features → strategy → execution → reporting**. + Designed for **research, backtesting and live trading using the same code path**. - Deterministic dataflow (no hidden state, no lookahead bias) -- Dependency-aware feature engines (DAG with cached context) -- Same pipeline for backtest, dry-run and live execution +- Explicit separation of data, features, strategy and execution +- Same strategy domain reused in backtest, dry-run and live trading +- Dependency-aware feature computation (DAG with cached context) - Stress-tested at high signal density (hundreds of thousands of signals) - Focus on engineering correctness and execution realism — not alpha marketing --- -## Backtest run pipeline +## Backtest run pipeline (high level) ```mermaid flowchart LR - A[Run]:::layer --> D[Data Layer]:::layer - D -->|OHLCV + cache| S[Strategy Engine]:::layer - S -->|signals + plans| E[Execution Engine]:::layer - E -->|trades| R[Risk & Reporting]:::layer + A[Run] --> D[Data Layer] + D -->|OHLCV + cache| S[Strategy Domain] + S -->|trade plans| E[Execution Engine] + E -->|trades| R[Risk & Reporting] +``` + +--- - classDef layer fill:#f5f5f5,stroke:#333,stroke-width:1px; +## Live run pipeline (high level) +```mermaid +flowchart LR + A[Run] --> M[Broker Connector] + M --> W[Warmup & Live Data] + W --> S[Strategy Domain] + S -->|trade plans| E[Live Execution Engine] + E --> B[Broker] ``` +### Key point: +Both backtest and live execution use the same Strategy Domain. +Differences are limited to orchestration, data sources and side effects. + --- -## Live run pipeline +## Shared Strategy Domain + +The strategy layer is implemented as a pure, deterministic domain shared between backtest and live execution. + +Both modes execute the same sequence: + + - multi-timeframe data alignment + - feature computation + - entry and exit signal generation + - trade plan construction + +Differences between backtest and live are strictly limited to: + + - historical vs broker data sources + - full-range vs candle-based orchestration + - simulated vs real execution side effects + +This ensures that live trading decisions are structurally identical to those evaluated during research. ```mermaid flowchart LR - A[Run]:::layer --> M[MT5 Connector]:::layer - M -->|login + symbol select
live / dry-run| W[Warmup Data]:::layer - W -->|OHLCV dataframe + informatives| S[Strategy Engine]:::layer - S -->|signals + plans| E[Live Execution Engine]:::layer - E -->|orders / positions| B[Broker / MT5]:::layer - classDef layer fill:#f5f5f5,stroke:#333,stroke-width:1px; + subgraph Strategy Domain *shared* + A[apply informatives] + B[populate indicators] + C[populate entry signals] + D[populate exit signals] + E[build trade plans] + + A --> B --> C --> D --> E + end + + Data --> A + E --> Execution +``` +--- + +## Feature Engineering + +Strategy logic operates exclusively on engineered features, computed directly from raw price series. + +Feature computation is handled by a dedicated FeatureEngineering module, whose responsibility is to transform +price action into clean, explicit, strategy-ready features. + +Strategies consume engineered features, not price series. + +Key characteristics: + + - Deterministic and reproducible feature computation + - Dependency-aware execution (explicit feature DAG) + - No implicit state or hidden coupling + - Identical feature pipeline used in backtest and live trading + +The FeatureEngineering module is extensible by design and currently includes, +among others, a Market Structure Engine for price behavior analysis. + +Market Structure analysis is treated as one implementation detail of feature extraction, +not as a strategy or execution concern. + +```mermaid +flowchart LR + + subgraph Strategy Domain + A[populate indicators] + + subgraph Feature Engineering + FE[FeatureEngineering module] + MS[Market Structure Engine] + FE --> MS + end + + A --> FE + FE --> A + end ``` --- -## Architecture +## Architecture principles -Architecture is designed for **deterministic computation, modularity and testability**. +Architecture is designed for deterministic computation, modularity and testability. -- Feature computation organized as a dependency-aware DAG -- Explicit separation: computation context vs final dataframe outputs -- Vectorized pandas with optional numba hot paths -- No global mutable state, reproducible runs -- Same execution engine reused across backtest and live trading -- Explicit execution policies and structured cost model - (spread / slippage / financing) + - Explicit separation of concerns: + - data access + - feature computation + - strategy logic + - execution side effects + - Dependency-aware feature engines (DAG) + - Vectorized pandas with optional numba hot paths + - No global mutable state, reproducible runs + - Same execution semantics reused across backtest and live trading + - Explicit execution policies and structured cost model (spread / slippage / financing) --- -## Research +## Performance characteristics + +The framework is designed to handle **large historical datasets and high signal density** +without sacrificing determinism or architectural clarity. + +A representative backtest run (2 symbols, multi-timeframe, full feature pipeline): + +- ~540k OHLCV bars per symbol (M1 base timeframe) +- Multi-timeframe feature computation (M1 + M30) +- ~16k generated signals +- ~4k executed trades +- Full execution cost modeling and reporting + +Observed wall-clock performance on a typical workstation: + +- Data loading & caching: ~22s +- Strategy execution (features + signals + plans): ~25s (12.5s per symbol) +- Backtest execution (parallel): ~1.9s +- Reporting & persistence: <1s + +**Total end-to-end runtime:** ~53s + +The system has been stress-tested with intentionally higher signal density +(hundreds of thousands of signals) to validate execution scalability. + +--- + +### Profiling insights + +Runtime profiling confirms that execution time is dominated by expected domains: + +- Data access and preprocessing (historical OHLCV loading and caching) +- Feature engineering, including dependency-aware market structure analysis +- Vectorized pandas operations inside feature computation + +Orchestration, execution control flow and trade simulation introduce negligible overhead. +This validates the architectural separation between: +- deterministic computation +- orchestration +- execution side effects + +FeatureEngineering hot paths are explicitly identified and measured, +making further optimization (e.g. numba acceleration or alternative representations) +a targeted, incremental process rather than a speculative refactor. + +--- +## Research Workflow Research workflow enforces execution realism by construction. @@ -85,26 +213,56 @@ Interactive reporting includes: 👉 **Sample dashboard:** https://folg-code.github.io/pandas-quant-trading-framework/ +--- + +## Documentation + +Additional documentation describing architectural details and design decisions: + +- **Architecture overview** + [`docs/architecture/pipelines.md`](docs/architecture/pipelines.md) + Detailed breakdown of backtest and live pipelines, shared strategy domain + and execution responsibilities. + +- **Feature engineering** + [`docs/architecture/feature_engine-dag.md`](docs/architecture/feature_engine-dag.md) + Design philosophy, dependency-aware feature DAG and extensibility model. + +- **Performance & profiling** + [`docs/dev/performance.md`](docs/dev/performance.md) + End-to-end runtime measurements, profiling results and optimization strategy. + +- **Reporting overview* + [`docs/reporting/overviw.md`](docs/reporting/overviw.md) + Overview for reports methodology and renders . --- ## Roadmap +Top priority: + +- Automated lookahead-bias detection + (systematic verification that strategy logic does not access future data, + across multi-timeframe features and execution paths) +While the architecture is designed to prevent lookahead by construction, +the goal is to make temporal correctness explicitly testable and provable. + +Recently completed: + +- Deterministic test suite covering: + - strategy domain (signals and trade plans) + - backtest and live execution + - data providers +- Architectural stabilization after major refactors + Short-term focus: -- Comprehensive test suite - (tests were intentionally deferred during heavy architectural refactors; - architecture is now stable and formal coverage becomes top priority) -- Finalize TechnicalAnalysis refactor (architecture cleanup) +- Finalize FeatureEngineering refactor - Feature research and development - Portfolio-level risk aggregation - Multi-strategy comparison dashboard -- Scenario stress-testing framework +- Scenario-based stress-testing framework - Execution simulator extensions ---- - -## Disclaimer - -This project is intended for **research and engineering purposes**. -It does not constitute financial advice and does not attempt to market trading alpha. +--- \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index fc9aa13..43feeb6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,4 +20,7 @@ MetaTrader5>=5.0 pytest>=7.4 -pyarrow \ No newline at end of file +pyarrow + +pytest +pytest-mock \ No newline at end of file