600 lines
25 KiB
Python
Executable File
600 lines
25 KiB
Python
Executable File
import sqlite3
|
|
import pandas as pd
|
|
import numpy as np
|
|
from datetime import datetime, timedelta
|
|
import matplotlib.pyplot as plt
|
|
import matplotlib.dates as mdates
|
|
import seaborn as sns
|
|
from matplotlib.backends.backend_pdf import PdfPages
|
|
import warnings
|
|
import os
|
|
warnings.filterwarnings('ignore')
|
|
|
|
class EmotionalDamageStrategy:
|
|
def __init__(self, initial_capital=100000):
|
|
self.initial_capital = initial_capital
|
|
self.cash = initial_capital
|
|
self.positions = {} # ticker: shares
|
|
self.portfolio_value = []
|
|
self.trades = []
|
|
self.state = 'QQQ_HOLD' # QQQ_HOLD, CASH_WAIT, VOLATILE_STOCKS
|
|
self.fear_threshold = 25
|
|
self.greed_threshold = 75
|
|
|
|
def get_data(self):
|
|
"""Load Fear & Greed Index and QQQ/SPY data"""
|
|
import os
|
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
backtest_dir = os.path.dirname(os.path.dirname(script_dir))
|
|
db_path = os.path.join(backtest_dir, 'data', 'stock_data.db')
|
|
conn = sqlite3.connect(db_path)
|
|
|
|
# Get Fear & Greed Index
|
|
fg_data = pd.read_sql_query('''
|
|
SELECT date, fear_greed_index
|
|
FROM fear_greed_index
|
|
ORDER BY date
|
|
''', conn)
|
|
fg_data['date'] = pd.to_datetime(fg_data['date'])
|
|
fg_data.set_index('date', inplace=True)
|
|
|
|
# Get real QQQ price data
|
|
qqq_data = pd.read_sql_query('''
|
|
SELECT date, close as qqq_close
|
|
FROM qqq
|
|
ORDER BY date
|
|
''', conn)
|
|
qqq_data['date'] = pd.to_datetime(qqq_data['date'])
|
|
qqq_data.set_index('date', inplace=True)
|
|
|
|
# Get available tickers for high volatility selection
|
|
cursor = conn.cursor()
|
|
cursor.execute('SELECT ticker FROM ticker_list WHERE records > 1000')
|
|
self.available_tickers = [row[0] for row in cursor.fetchall()]
|
|
|
|
conn.close()
|
|
|
|
# Merge data
|
|
self.data = pd.merge(fg_data, qqq_data, left_index=True, right_index=True, how='inner')
|
|
self.data.sort_index(inplace=True)
|
|
|
|
print(f"Loaded data from {self.data.index.min().strftime('%Y-%m-%d')} to {self.data.index.max().strftime('%Y-%m-%d')}")
|
|
print(f"Available tickers for high volatility selection: {len(self.available_tickers)}")
|
|
|
|
def get_stock_price(self, ticker, date):
|
|
"""Get stock price for a specific ticker and date"""
|
|
import os
|
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
backtest_dir = os.path.dirname(os.path.dirname(script_dir))
|
|
db_path = os.path.join(backtest_dir, 'data', 'stock_data.db')
|
|
conn = sqlite3.connect(db_path)
|
|
|
|
query = f'''
|
|
SELECT close FROM {ticker.lower()}
|
|
WHERE date <= ?
|
|
ORDER BY date DESC
|
|
LIMIT 1
|
|
'''
|
|
|
|
cursor = conn.cursor()
|
|
cursor.execute(query, (date.strftime('%Y-%m-%d'),))
|
|
result = cursor.fetchone()
|
|
conn.close()
|
|
|
|
return result[0] if result else None
|
|
|
|
def calculate_volatility(self, tickers, start_date, end_date):
|
|
"""Calculate historical volatility for tickers during fear period"""
|
|
volatilities = {}
|
|
import os
|
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
backtest_dir = os.path.dirname(os.path.dirname(script_dir))
|
|
db_path = os.path.join(backtest_dir, 'data', 'stock_data.db')
|
|
conn = sqlite3.connect(db_path)
|
|
|
|
for ticker in tickers:
|
|
try:
|
|
query = f'''
|
|
SELECT date, close FROM {ticker.lower()}
|
|
WHERE date >= ? AND date <= ?
|
|
ORDER BY date
|
|
'''
|
|
|
|
df = pd.read_sql_query(query, conn, params=(
|
|
start_date.strftime('%Y-%m-%d'),
|
|
end_date.strftime('%Y-%m-%d')
|
|
))
|
|
|
|
if len(df) > 10: # Ensure sufficient data
|
|
df['returns'] = df['close'].pct_change()
|
|
volatility = df['returns'].std() * np.sqrt(252) # Annualized volatility
|
|
volatilities[ticker] = volatility
|
|
|
|
except Exception as e:
|
|
continue
|
|
|
|
conn.close()
|
|
return volatilities
|
|
|
|
def select_top_volatile_stocks(self, fear_start_date, fear_end_date, top_n=10):
|
|
"""Select top N most volatile stocks during fear period"""
|
|
volatilities = self.calculate_volatility(self.available_tickers, fear_start_date, fear_end_date)
|
|
|
|
# Sort by volatility and select top N
|
|
sorted_vol = sorted(volatilities.items(), key=lambda x: x[1], reverse=True)
|
|
top_stocks = [ticker for ticker, vol in sorted_vol[:top_n]]
|
|
|
|
print(f"Top {top_n} volatile stocks during fear period {fear_start_date.strftime('%Y-%m-%d')} to {fear_end_date.strftime('%Y-%m-%d')}: {top_stocks}")
|
|
|
|
return top_stocks
|
|
|
|
def execute_trade(self, date, action, ticker=None, amount=None):
|
|
"""Execute a trade and record it"""
|
|
if action == 'BUY_QQQ':
|
|
# Buy QQQ with all cash
|
|
price = self.data.loc[date, 'qqq_close'] # Using real QQQ price
|
|
shares = self.cash / price
|
|
self.positions['QQQ'] = shares
|
|
self.cash = 0
|
|
self.trades.append({
|
|
'date': date,
|
|
'action': 'BUY_QQQ',
|
|
'ticker': 'QQQ',
|
|
'shares': shares,
|
|
'price': price,
|
|
'value': shares * price
|
|
})
|
|
|
|
elif action == 'SELL_QQQ':
|
|
# Sell all QQQ
|
|
if 'QQQ' in self.positions:
|
|
shares = self.positions['QQQ']
|
|
price = self.data.loc[date, 'qqq_close']
|
|
self.cash = shares * price
|
|
del self.positions['QQQ']
|
|
self.trades.append({
|
|
'date': date,
|
|
'action': 'SELL_QQQ',
|
|
'ticker': 'QQQ',
|
|
'shares': shares,
|
|
'price': price,
|
|
'value': shares * price
|
|
})
|
|
|
|
elif action == 'BUY_VOLATILE':
|
|
# Buy volatile stocks with equal weight
|
|
if ticker and amount:
|
|
price = self.get_stock_price(ticker, date)
|
|
if price:
|
|
shares = amount / price
|
|
self.positions[ticker] = shares
|
|
self.trades.append({
|
|
'date': date,
|
|
'action': 'BUY_VOLATILE',
|
|
'ticker': ticker,
|
|
'shares': shares,
|
|
'price': price,
|
|
'value': amount
|
|
})
|
|
|
|
elif action == 'SELL_ALL_VOLATILE':
|
|
# Sell all volatile stocks
|
|
total_value = 0
|
|
for ticker in list(self.positions.keys()):
|
|
if ticker != 'QQQ':
|
|
shares = self.positions[ticker]
|
|
price = self.get_stock_price(ticker, date)
|
|
if price:
|
|
value = shares * price
|
|
total_value += value
|
|
self.trades.append({
|
|
'date': date,
|
|
'action': 'SELL_VOLATILE',
|
|
'ticker': ticker,
|
|
'shares': shares,
|
|
'price': price,
|
|
'value': value
|
|
})
|
|
del self.positions[ticker]
|
|
self.cash = total_value
|
|
|
|
def calculate_portfolio_value(self, date):
|
|
"""Calculate total portfolio value at given date"""
|
|
total_value = self.cash
|
|
|
|
for ticker, shares in self.positions.items():
|
|
if ticker == 'QQQ':
|
|
price = self.data.loc[date, 'qqq_close']
|
|
else:
|
|
price = self.get_stock_price(ticker, date)
|
|
|
|
if price:
|
|
total_value += shares * price
|
|
|
|
return total_value
|
|
|
|
def run_backtest(self):
|
|
"""Run the emotional damage strategy backtest"""
|
|
print("Running Emotional Damage Strategy Backtest...")
|
|
|
|
self.get_data()
|
|
|
|
# Start with QQQ
|
|
first_date = self.data.index[0]
|
|
self.execute_trade(first_date, 'BUY_QQQ')
|
|
self.state = 'QQQ_HOLD'
|
|
|
|
fear_start_date = None
|
|
|
|
for i, (date, row) in enumerate(self.data.iterrows()):
|
|
fg_index = row['fear_greed_index']
|
|
|
|
if self.state == 'QQQ_HOLD':
|
|
# Check if Fear & Greed drops below 25
|
|
if fg_index < self.fear_threshold:
|
|
self.execute_trade(date, 'SELL_QQQ')
|
|
self.state = 'CASH_WAIT'
|
|
fear_start_date = date
|
|
print(f"{date.strftime('%Y-%m-%d')}: Fear & Greed {fg_index:.1f} < 25, selling QQQ, holding cash")
|
|
|
|
elif self.state == 'CASH_WAIT':
|
|
# Check if Fear & Greed recovers above 25
|
|
if fg_index >= self.fear_threshold and fear_start_date:
|
|
# Select top volatile stocks during fear period
|
|
fear_end_date = date
|
|
top_volatile = self.select_top_volatile_stocks(fear_start_date, fear_end_date)
|
|
|
|
# Buy top volatile stocks with equal weight
|
|
if top_volatile:
|
|
amount_per_stock = self.cash / len(top_volatile)
|
|
for ticker in top_volatile:
|
|
self.execute_trade(date, 'BUY_VOLATILE', ticker, amount_per_stock)
|
|
self.cash = 0 # All cash invested
|
|
self.state = 'VOLATILE_STOCKS'
|
|
print(f"{date.strftime('%Y-%m-%d')}: Fear & Greed recovered to {fg_index:.1f}, buying volatile stocks: {top_volatile}")
|
|
|
|
elif self.state == 'VOLATILE_STOCKS':
|
|
# Check if Fear & Greed exceeds 75 (extreme greed)
|
|
if fg_index > self.greed_threshold:
|
|
self.execute_trade(date, 'SELL_ALL_VOLATILE')
|
|
self.execute_trade(date, 'BUY_QQQ')
|
|
self.state = 'QQQ_HOLD'
|
|
print(f"{date.strftime('%Y-%m-%d')}: Fear & Greed {fg_index:.1f} > 75, selling volatile stocks, buying QQQ")
|
|
|
|
# Record portfolio value
|
|
portfolio_value = self.calculate_portfolio_value(date)
|
|
self.portfolio_value.append({
|
|
'date': date,
|
|
'value': portfolio_value,
|
|
'state': self.state,
|
|
'fg_index': fg_index
|
|
})
|
|
|
|
print(f"Backtest completed! Total trades: {len(self.trades)}")
|
|
|
|
def calculate_performance_metrics(self, returns):
|
|
"""Calculate performance metrics"""
|
|
total_return = (returns.iloc[-1] / returns.iloc[0] - 1) * 100
|
|
annual_return = ((returns.iloc[-1] / returns.iloc[0]) ** (252 / len(returns)) - 1) * 100
|
|
|
|
# Calculate max drawdown
|
|
peak = returns.expanding().max()
|
|
drawdown = (returns - peak) / peak
|
|
max_drawdown = drawdown.min() * 100
|
|
|
|
# Find max drawdown period
|
|
max_dd_date = drawdown.idxmin()
|
|
|
|
# Calculate Sharpe ratio
|
|
daily_returns = returns.pct_change().dropna()
|
|
sharpe_ratio = np.sqrt(252) * daily_returns.mean() / daily_returns.std()
|
|
|
|
# Annual returns by year
|
|
annual_rets = {}
|
|
for year in returns.index.year.unique():
|
|
year_data = returns[returns.index.year == year]
|
|
if len(year_data) > 1:
|
|
year_return = (year_data.iloc[-1] / year_data.iloc[0] - 1) * 100
|
|
annual_rets[year] = year_return
|
|
|
|
return {
|
|
'total_return': total_return,
|
|
'annual_return': annual_return,
|
|
'max_drawdown': max_drawdown,
|
|
'max_drawdown_date': max_dd_date,
|
|
'sharpe_ratio': sharpe_ratio,
|
|
'annual_returns': annual_rets
|
|
}
|
|
|
|
def run_emotional_damage_backtest():
|
|
"""Run the emotional damage strategy and generate results"""
|
|
|
|
# Run strategy
|
|
strategy = EmotionalDamageStrategy(initial_capital=100000)
|
|
strategy.run_backtest()
|
|
|
|
# Convert results to DataFrame
|
|
portfolio_df = pd.DataFrame(strategy.portfolio_value)
|
|
portfolio_df.set_index('date', inplace=True)
|
|
|
|
# Get benchmark data (QQQ and SPY)
|
|
import os
|
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
backtest_dir = os.path.dirname(os.path.dirname(script_dir))
|
|
db_path = os.path.join(backtest_dir, 'data', 'stock_data.db')
|
|
conn = sqlite3.connect(db_path)
|
|
|
|
# Get QQQ data
|
|
qqq_data = pd.read_sql_query('''
|
|
SELECT date, close as qqq_close
|
|
FROM qqq
|
|
ORDER BY date
|
|
''', conn)
|
|
qqq_data['date'] = pd.to_datetime(qqq_data['date'])
|
|
qqq_data.set_index('date', inplace=True)
|
|
|
|
# Get SPY data
|
|
spy_data = pd.read_sql_query('''
|
|
SELECT date, spy_close
|
|
FROM fear_greed_data
|
|
ORDER BY date
|
|
''', conn)
|
|
spy_data['date'] = pd.to_datetime(spy_data['date'])
|
|
spy_data.set_index('date', inplace=True)
|
|
|
|
conn.close()
|
|
|
|
# Merge benchmark data
|
|
benchmark_data = pd.merge(qqq_data, spy_data, left_index=True, right_index=True, how='inner')
|
|
|
|
# Align dates
|
|
common_dates = portfolio_df.index.intersection(benchmark_data.index)
|
|
portfolio_df = portfolio_df.loc[common_dates]
|
|
benchmark_data = benchmark_data.loc[common_dates]
|
|
|
|
# Normalize to starting value for comparison
|
|
start_value = 100000
|
|
|
|
# Create QQQ and SPY buy-and-hold benchmarks with correct data
|
|
benchmark_data['qqq_value'] = start_value * (benchmark_data['qqq_close'] / benchmark_data['qqq_close'].iloc[0])
|
|
benchmark_data['spy_value'] = start_value * (benchmark_data['spy_close'] / benchmark_data['spy_close'].iloc[0])
|
|
|
|
# Calculate performance metrics
|
|
strategy_metrics = strategy.calculate_performance_metrics(portfolio_df['value'])
|
|
qqq_metrics = strategy.calculate_performance_metrics(benchmark_data['qqq_value'])
|
|
spy_metrics = strategy.calculate_performance_metrics(benchmark_data['spy_value'])
|
|
|
|
return {
|
|
'strategy': strategy,
|
|
'portfolio_df': portfolio_df,
|
|
'benchmark_data': benchmark_data,
|
|
'strategy_metrics': strategy_metrics,
|
|
'qqq_metrics': qqq_metrics,
|
|
'spy_metrics': spy_metrics
|
|
}
|
|
|
|
def generate_original_pdf_report(results, reports_dir="/home/will/docker/backtest/reports"):
|
|
"""Generate PDF report for original emotional damage strategy"""
|
|
strategy = results['strategy']
|
|
portfolio_df = results['portfolio_df']
|
|
benchmark_data = results['benchmark_data']
|
|
strategy_metrics = results['strategy_metrics']
|
|
qqq_metrics = results['qqq_metrics']
|
|
spy_metrics = results['spy_metrics']
|
|
|
|
# Create output directory
|
|
os.makedirs(reports_dir, exist_ok=True)
|
|
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
|
|
|
# Prepare trades data
|
|
trades_df = pd.DataFrame(strategy.trades)
|
|
|
|
# Find max drawdown year
|
|
def find_max_drawdown_year(returns):
|
|
peak = returns.expanding().max()
|
|
drawdown = (returns - peak) / peak
|
|
max_dd_date = drawdown.idxmin()
|
|
return max_dd_date.year
|
|
|
|
strategy_dd_year = find_max_drawdown_year(portfolio_df['value'])
|
|
qqq_dd_year = find_max_drawdown_year(benchmark_data['qqq_value'])
|
|
spy_dd_year = find_max_drawdown_year(benchmark_data['spy_value'])
|
|
|
|
# Create PDF
|
|
pdf_file = os.path.join(reports_dir, f'original_emotional_damage_strategy_report_{timestamp}.pdf')
|
|
|
|
with PdfPages(pdf_file) as pdf:
|
|
# Set global font parameters
|
|
plt.rcParams['font.size'] = 10
|
|
plt.rcParams['axes.titlesize'] = 12
|
|
plt.rcParams['axes.labelsize'] = 10
|
|
plt.rcParams['xtick.labelsize'] = 8
|
|
plt.rcParams['ytick.labelsize'] = 8
|
|
plt.rcParams['legend.fontsize'] = 8
|
|
plt.rcParams['figure.titlesize'] = 14
|
|
|
|
# Page 1: Performance Comparison
|
|
fig1 = plt.figure(figsize=(8.5, 11))
|
|
fig1.suptitle('Original Emotional Damage Strategy Report', fontsize=16, fontweight='bold', y=0.96)
|
|
|
|
# 1. Total Return Curve
|
|
ax1 = plt.subplot(4, 1, 1)
|
|
ax1.plot(portfolio_df.index, portfolio_df['value'] / 1000,
|
|
label='Original Strategy', linewidth=2, color='red')
|
|
ax1.plot(benchmark_data.index, benchmark_data['qqq_value'] / 1000,
|
|
label='QQQ', linewidth=2, color='blue')
|
|
ax1.plot(benchmark_data.index, benchmark_data['spy_value'] / 1000,
|
|
label='SPY', linewidth=2, color='green')
|
|
ax1.set_title('Portfolio Performance Comparison', fontsize=14, fontweight='bold', pad=25)
|
|
ax1.set_ylabel('Portfolio Value ($K)', fontsize=11)
|
|
ax1.legend(fontsize=10, loc='upper left')
|
|
ax1.grid(True, alpha=0.3)
|
|
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
|
|
for label in ax1.get_xticklabels():
|
|
label.set_rotation(45)
|
|
|
|
# 2. Performance Metrics Table
|
|
ax2 = plt.subplot(4, 1, 2)
|
|
ax2.axis('off')
|
|
|
|
metrics_data = [
|
|
['Metric', 'Original Strategy', 'QQQ', 'SPY'],
|
|
['Total Return', f"{strategy_metrics['total_return']:.1f}%",
|
|
f"{qqq_metrics['total_return']:.1f}%", f"{spy_metrics['total_return']:.1f}%"],
|
|
['Annual Return', f"{strategy_metrics['annual_return']:.1f}%",
|
|
f"{qqq_metrics['annual_return']:.1f}%", f"{spy_metrics['annual_return']:.1f}%"],
|
|
['Max Drawdown', f"{strategy_metrics['max_drawdown']:.1f}%",
|
|
f"{qqq_metrics['max_drawdown']:.1f}%", f"{spy_metrics['max_drawdown']:.1f}%"],
|
|
['Max DD Year', str(strategy_dd_year), str(qqq_dd_year), str(spy_dd_year)],
|
|
['Sharpe Ratio', f"{strategy_metrics['sharpe_ratio']:.2f}",
|
|
f"{qqq_metrics['sharpe_ratio']:.2f}", f"{spy_metrics['sharpe_ratio']:.2f}"],
|
|
['Total Trades', f"{len(strategy.trades)}", 'N/A', 'N/A']
|
|
]
|
|
|
|
table = ax2.table(cellText=metrics_data, cellLoc='center', loc='center')
|
|
table.auto_set_font_size(False)
|
|
table.set_fontsize(10)
|
|
table.scale(1.2, 2.0)
|
|
|
|
for i in range(len(metrics_data[0])):
|
|
table[(0, i)].set_facecolor('#40466e')
|
|
table[(0, i)].set_text_props(weight='bold', color='white')
|
|
|
|
ax2.set_title('Performance Metrics Comparison', fontsize=14, fontweight='bold', pad=25)
|
|
|
|
# 3. Strategy State Timeline
|
|
ax3 = plt.subplot(4, 1, 3)
|
|
|
|
state_colors = {
|
|
'QQQ_HOLD': 'blue',
|
|
'CASH_WAIT': 'gray',
|
|
'VOLATILE_STOCKS': 'red'
|
|
}
|
|
|
|
for state, color in state_colors.items():
|
|
state_data = portfolio_df[portfolio_df['state'] == state]
|
|
if not state_data.empty:
|
|
ax3.scatter(state_data.index, state_data['value'] / 1000,
|
|
c=color, s=2, alpha=0.8, label=state)
|
|
|
|
ax3.set_title('Strategy State Timeline', fontsize=14, fontweight='bold', pad=25)
|
|
ax3.set_ylabel('Total Assets ($K)', fontsize=11)
|
|
ax3.legend(bbox_to_anchor=(1.02, 1), loc='upper left', fontsize=8)
|
|
ax3.grid(True, alpha=0.3)
|
|
ax3.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
|
|
for label in ax3.get_xticklabels():
|
|
label.set_rotation(45)
|
|
|
|
# 4. Annual Returns Comparison
|
|
ax4 = plt.subplot(4, 1, 4)
|
|
years = list(strategy_metrics['annual_returns'].keys())
|
|
original_returns = list(strategy_metrics['annual_returns'].values())
|
|
qqq_returns = [qqq_metrics['annual_returns'].get(year, 0) for year in years]
|
|
spy_returns = [spy_metrics['annual_returns'].get(year, 0) for year in years]
|
|
|
|
x = np.arange(len(years))
|
|
width = 0.25
|
|
|
|
ax4.bar(x - width, original_returns, width, label='Original Strategy', color='red', alpha=0.8)
|
|
ax4.bar(x, qqq_returns, width, label='QQQ', color='blue', alpha=0.8)
|
|
ax4.bar(x + width, spy_returns, width, label='SPY', color='green', alpha=0.8)
|
|
|
|
ax4.set_title('Annual Returns Comparison by Year', fontsize=14, fontweight='bold', pad=25)
|
|
ax4.set_ylabel('Annual Return (%)', fontsize=11)
|
|
ax4.set_xlabel('Year', fontsize=11)
|
|
ax4.set_xticks(x)
|
|
ax4.set_xticklabels(years)
|
|
for label in ax4.get_xticklabels():
|
|
label.set_rotation(90)
|
|
ax4.legend(fontsize=10)
|
|
ax4.grid(True, alpha=0.3, axis='y')
|
|
|
|
plt.subplots_adjust(left=0.1, right=0.85, top=0.90, bottom=0.08, hspace=0.6)
|
|
pdf.savefig(fig1, bbox_inches='tight', dpi=150)
|
|
plt.close()
|
|
|
|
# Page 2: Detailed Analysis
|
|
fig2 = plt.figure(figsize=(8.5, 11))
|
|
fig2.suptitle('Detailed Trading and Market Analysis', fontsize=16, fontweight='bold', y=0.95)
|
|
|
|
# 5. Trading Activity by Year
|
|
ax5 = plt.subplot(3, 1, 1)
|
|
|
|
trades_df_copy = trades_df.copy()
|
|
trades_df_copy['year'] = trades_df_copy['date'].dt.year
|
|
trade_frequency = trades_df_copy.groupby('year').size()
|
|
|
|
ax5.bar(trade_frequency.index, trade_frequency.values, color='purple', alpha=0.8, width=0.6)
|
|
ax5.set_title('Trading Activity by Year', fontsize=14, fontweight='bold', pad=30)
|
|
ax5.set_ylabel('Number of Trades', fontsize=11)
|
|
ax5.set_xlabel('Year', fontsize=11)
|
|
ax5.grid(True, alpha=0.3, axis='y')
|
|
for label in ax5.get_xticklabels():
|
|
label.set_rotation(45)
|
|
|
|
# 6. Trade Type Distribution
|
|
ax6 = plt.subplot(3, 1, 2)
|
|
|
|
action_counts = trades_df['action'].value_counts()
|
|
ax6.pie(action_counts.values, labels=action_counts.index, autopct='%1.1f%%', startangle=90)
|
|
ax6.set_title('Trade Type Distribution', fontsize=14, fontweight='bold', pad=30)
|
|
|
|
# 7. Fear & Greed Index with Trading Signals
|
|
ax7 = plt.subplot(3, 1, 3)
|
|
|
|
fg_data = portfolio_df['fg_index'].dropna()
|
|
ax7.plot(fg_data.index, fg_data.values, color='purple', alpha=0.8, linewidth=1.5)
|
|
ax7.axhline(y=25, color='red', linestyle='--', alpha=0.7, linewidth=2, label='Fear Threshold (25)')
|
|
ax7.axhline(y=75, color='green', linestyle='--', alpha=0.7, linewidth=2, label='Greed Threshold (75)')
|
|
ax7.fill_between(fg_data.index, 0, 25, alpha=0.2, color='red', label='Fear Zone')
|
|
ax7.fill_between(fg_data.index, 75, 100, alpha=0.2, color='green', label='Greed Zone')
|
|
|
|
# Add trade markers
|
|
buy_trades = trades_df[trades_df['action'].str.contains('BUY')]
|
|
sell_trades = trades_df[trades_df['action'].str.contains('SELL')]
|
|
|
|
if not buy_trades.empty:
|
|
ax7.scatter(buy_trades['date'], [50] * len(buy_trades),
|
|
color='darkgreen', s=15, alpha=0.8, marker='^', label='Buy Signals', zorder=5)
|
|
if not sell_trades.empty:
|
|
ax7.scatter(sell_trades['date'], [50] * len(sell_trades),
|
|
color='darkred', s=15, alpha=0.8, marker='v', label='Sell Signals', zorder=5)
|
|
|
|
ax7.set_title('Fear & Greed Index with Trading Signals', fontsize=14, fontweight='bold', pad=30)
|
|
ax7.set_ylabel('CNN Fear & Greed Index', fontsize=11)
|
|
ax7.set_xlabel('Date', fontsize=11)
|
|
ax7.set_ylim(0, 100)
|
|
ax7.legend(bbox_to_anchor=(1.02, 1), loc='upper left', fontsize=8)
|
|
ax7.grid(True, alpha=0.3)
|
|
ax7.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
|
|
for label in ax7.get_xticklabels():
|
|
label.set_rotation(45)
|
|
|
|
plt.subplots_adjust(left=0.1, right=0.85, top=0.88, bottom=0.10, hspace=1.0)
|
|
pdf.savefig(fig2, bbox_inches='tight', dpi=150)
|
|
plt.close()
|
|
|
|
# Validate PDF
|
|
try:
|
|
file_size = os.path.getsize(pdf_file)
|
|
print(f"📈 PDF报告已保存: {pdf_file} (Size: {file_size:,} bytes)")
|
|
|
|
with open(pdf_file, 'rb') as f:
|
|
header = f.read(10)
|
|
if header.startswith(b'%PDF'):
|
|
print(f"✅ PDF file validation passed")
|
|
else:
|
|
print(f"⚠️ Warning: Generated file may not be a valid PDF")
|
|
|
|
except Exception as e:
|
|
print(f"⚠️ Error validating PDF: {e}")
|
|
|
|
return pdf_file
|
|
|
|
if __name__ == "__main__":
|
|
results = run_emotional_damage_backtest()
|
|
print("Backtest completed! Generating PDF report...")
|
|
|
|
# Generate PDF report
|
|
pdf_file = generate_original_pdf_report(results)
|
|
print(f"🎉 Original Emotional Damage Strategy report generated: {pdf_file}") |