diff --git a/adwireguard/docker-compose.yml b/adwireguard/docker-compose.yml
index 7969308..a3364c0 100644
--- a/adwireguard/docker-compose.yml
+++ b/adwireguard/docker-compose.yml
@@ -14,8 +14,8 @@ services:
- SERVERURL=will123song.xyz # 从caddy配置中获取的域名
- SERVERPORT=51820
- PEERS=5 # 支持5个客户端
- - PEERDNS=10.99.99.1 # 使用AdGuard作为DNS
- - INTERNAL_SUBNET=10.99.99.0
+ - PEERDNS=1.1.1.1,8.8.8.8 # 使用Cloudflare和Google DNS
+ - INTERNAL_SUBNET=10.88.88.0
- ALLOWEDIPS=0.0.0.0/0
- LOG_CONFS=true
volumes:
@@ -26,51 +26,6 @@ services:
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
restart: unless-stopped
- networks:
- adwireguard_network:
- ipv4_address: 10.99.99.2
-
- # AdGuard Home DNS Server (integrated with WireGuard)
- adguard:
- image: adguard/adguardhome:latest
- container_name: adguard_wg
- restart: unless-stopped
- ports:
- - "3002:3000/tcp" # Web UI (different port to avoid conflict)
- volumes:
- - ./adguard_work:/opt/adguardhome/work
- - ./adguard_conf:/opt/adguardhome/conf
- networks:
- adwireguard_network:
- ipv4_address: 10.99.99.1
- depends_on:
- - wireguard
-
- # Web管理界面代理 (可选)
- nginx:
- image: nginx:alpine
- container_name: adwireguard_web
- restart: unless-stopped
- ports:
- - "8080:80"
- volumes:
- - ./nginx.conf:/etc/nginx/nginx.conf:ro
- - ./web:/usr/share/nginx/html:ro
- networks:
- - adwireguard_network
- depends_on:
- - adguard
- - wireguard
volumes:
- wireguard_config:
- adguard_work:
- adguard_conf:
-
-networks:
- adwireguard_network:
- driver: bridge
- ipam:
- config:
- - subnet: 10.99.99.0/24
- gateway: 10.99.99.1
\ No newline at end of file
+ wireguard_config:
\ No newline at end of file
diff --git a/adwireguard/wireguard_config/.donoteditthisfile b/adwireguard/wireguard_config/.donoteditthisfile
index fe5d389..8b4724f 100644
--- a/adwireguard/wireguard_config/.donoteditthisfile
+++ b/adwireguard/wireguard_config/.donoteditthisfile
@@ -1,7 +1,7 @@
ORIG_SERVERURL="will123song.xyz"
ORIG_SERVERPORT="51820"
-ORIG_PEERDNS="10.99.99.1"
+ORIG_PEERDNS="1.1.1.1,8.8.8.8"
ORIG_PEERS="5"
-ORIG_INTERFACE="10.99.99"
+ORIG_INTERFACE="10.88.88"
ORIG_ALLOWEDIPS="0.0.0.0/0"
ORIG_PERSISTENTKEEPALIVE_PEERS=""
diff --git a/adwireguard/wireguard_config/peer1/peer1.conf b/adwireguard/wireguard_config/peer1/peer1.conf
index 2aa5e73..1986e24 100644
--- a/adwireguard/wireguard_config/peer1/peer1.conf
+++ b/adwireguard/wireguard_config/peer1/peer1.conf
@@ -1,8 +1,8 @@
[Interface]
-Address = 10.99.99.2
+Address = 10.88.88.2
PrivateKey = eMKDKj+T/bqWPrvhRqPx8en6Vq1BUwDuT/Hhss871HQ=
ListenPort = 51820
-DNS = 10.99.99.1
+DNS = 1.1.1.1,8.8.8.8
[Peer]
PublicKey = WBqIC7XpVtjreZt5GF/BLo7DpXqZrbu9gv74pons2gA=
diff --git a/adwireguard/wireguard_config/peer1/peer1.png b/adwireguard/wireguard_config/peer1/peer1.png
index e2211fb..bc9dd84 100644
Binary files a/adwireguard/wireguard_config/peer1/peer1.png and b/adwireguard/wireguard_config/peer1/peer1.png differ
diff --git a/adwireguard/wireguard_config/peer2/peer2.conf b/adwireguard/wireguard_config/peer2/peer2.conf
index 4295ebf..08148f8 100644
--- a/adwireguard/wireguard_config/peer2/peer2.conf
+++ b/adwireguard/wireguard_config/peer2/peer2.conf
@@ -1,8 +1,8 @@
[Interface]
-Address = 10.99.99.3
+Address = 10.88.88.3
PrivateKey = 0LFpFeHETDTVbU8vy3uq2SGNWrBH5gJYU5LKhkHWCUc=
ListenPort = 51820
-DNS = 10.99.99.1
+DNS = 1.1.1.1,8.8.8.8
[Peer]
PublicKey = WBqIC7XpVtjreZt5GF/BLo7DpXqZrbu9gv74pons2gA=
diff --git a/adwireguard/wireguard_config/peer2/peer2.png b/adwireguard/wireguard_config/peer2/peer2.png
index 8313fbe..94e638b 100644
Binary files a/adwireguard/wireguard_config/peer2/peer2.png and b/adwireguard/wireguard_config/peer2/peer2.png differ
diff --git a/adwireguard/wireguard_config/peer3/peer3.conf b/adwireguard/wireguard_config/peer3/peer3.conf
index eb792a9..1d92ad0 100644
--- a/adwireguard/wireguard_config/peer3/peer3.conf
+++ b/adwireguard/wireguard_config/peer3/peer3.conf
@@ -1,8 +1,8 @@
[Interface]
-Address = 10.99.99.4
+Address = 10.88.88.4
PrivateKey = KElddZxSWLQXsr8vgWU7CxqFnHuf3s5lS0bj9hqoT3o=
ListenPort = 51820
-DNS = 10.99.99.1
+DNS = 1.1.1.1,8.8.8.8
[Peer]
PublicKey = WBqIC7XpVtjreZt5GF/BLo7DpXqZrbu9gv74pons2gA=
diff --git a/adwireguard/wireguard_config/peer3/peer3.png b/adwireguard/wireguard_config/peer3/peer3.png
index cc2f7de..8bd2a76 100644
Binary files a/adwireguard/wireguard_config/peer3/peer3.png and b/adwireguard/wireguard_config/peer3/peer3.png differ
diff --git a/adwireguard/wireguard_config/peer4/peer4.conf b/adwireguard/wireguard_config/peer4/peer4.conf
index e2fb9dc..9266fd5 100644
--- a/adwireguard/wireguard_config/peer4/peer4.conf
+++ b/adwireguard/wireguard_config/peer4/peer4.conf
@@ -1,8 +1,8 @@
[Interface]
-Address = 10.99.99.5
+Address = 10.88.88.5
PrivateKey = WA2IbKsg2rtbYCwJlZjox6nj3beGJEGu2p4qHFWHImQ=
ListenPort = 51820
-DNS = 10.99.99.1
+DNS = 1.1.1.1,8.8.8.8
[Peer]
PublicKey = WBqIC7XpVtjreZt5GF/BLo7DpXqZrbu9gv74pons2gA=
diff --git a/adwireguard/wireguard_config/peer4/peer4.png b/adwireguard/wireguard_config/peer4/peer4.png
index 977c525..9961d9e 100644
Binary files a/adwireguard/wireguard_config/peer4/peer4.png and b/adwireguard/wireguard_config/peer4/peer4.png differ
diff --git a/adwireguard/wireguard_config/peer5/peer5.conf b/adwireguard/wireguard_config/peer5/peer5.conf
index 5a729d9..87fd2d9 100644
--- a/adwireguard/wireguard_config/peer5/peer5.conf
+++ b/adwireguard/wireguard_config/peer5/peer5.conf
@@ -1,8 +1,8 @@
[Interface]
-Address = 10.99.99.6
+Address = 10.88.88.6
PrivateKey = eBoR6Lm6Llz8IefrHu94C9Qvc5rfUKxPaVPLmPZgiVc=
ListenPort = 51820
-DNS = 10.99.99.1
+DNS = 1.1.1.1,8.8.8.8
[Peer]
PublicKey = WBqIC7XpVtjreZt5GF/BLo7DpXqZrbu9gv74pons2gA=
diff --git a/adwireguard/wireguard_config/peer5/peer5.png b/adwireguard/wireguard_config/peer5/peer5.png
index dcef4f2..de1961c 100644
Binary files a/adwireguard/wireguard_config/peer5/peer5.png and b/adwireguard/wireguard_config/peer5/peer5.png differ
diff --git a/adwireguard/wireguard_config/wg_confs/wg0.conf b/adwireguard/wireguard_config/wg_confs/wg0.conf
index 8777197..e54b12a 100644
--- a/adwireguard/wireguard_config/wg_confs/wg0.conf
+++ b/adwireguard/wireguard_config/wg_confs/wg0.conf
@@ -1,5 +1,5 @@
[Interface]
-Address = 10.99.99.1
+Address = 10.88.88.1
ListenPort = 51820
PrivateKey = YNXol4rvLMXngA0pkxXUkkxANO+GMYArW7usbbpkvV4=
PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth+ -j MASQUERADE
@@ -9,29 +9,29 @@ PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACC
# peer1
PublicKey = fdn1mU/iG6avnWBzy0dKmAxgIFLGWO77KS8nIE15fnE=
PresharedKey = mP7oNgTh4FAzKRj+Ib53SC7uMO7fhxtEdTPwOuZCwD8=
-AllowedIPs = 10.99.99.2/32
+AllowedIPs = 10.88.88.2/32
[Peer]
# peer2
PublicKey = gtav3MLgVRukd/HJ0FFOFRJ7bhx/cU3WwfTQGu79GBY=
PresharedKey = Wbwh8gE6k63LOjuXAy1sFN6VqoGcgEvq62WONHutD6I=
-AllowedIPs = 10.99.99.3/32
+AllowedIPs = 10.88.88.3/32
[Peer]
# peer3
PublicKey = 9g0Q3umKlWhiL3dTmIiMSHXeCc9YXtg5vBTcOar7Bh8=
PresharedKey = NJotC0A6bO2o8mkpSWvZ1OSdi90jKkVFMSYMakzM+F0=
-AllowedIPs = 10.99.99.4/32
+AllowedIPs = 10.88.88.4/32
[Peer]
# peer4
PublicKey = eZEv8DzOwraaUyoeU31fDUQrKzHaPC8EGEWp6JH9xiI=
PresharedKey = RtXAOz29/zNgr6xcdeyRE3wzzu94QuwYc7AiWBh2Wqw=
-AllowedIPs = 10.99.99.5/32
+AllowedIPs = 10.88.88.5/32
[Peer]
# peer5
PublicKey = S2rwMN2aOoC1vsyyMjA3STT3AsYFHAOuglxMn03Ut0U=
PresharedKey = irzJtDsZhpL7+y9gyUFb7JVNfWQMTwiK3HVEQR7RBYk=
-AllowedIPs = 10.99.99.6/32
+AllowedIPs = 10.88.88.6/32
diff --git a/discord_bot/ai_bots/docker-compose.yml b/discord_bot/ai_bots/docker-compose.yml
index aed0918..2b6aec3 100644
--- a/discord_bot/ai_bots/docker-compose.yml
+++ b/discord_bot/ai_bots/docker-compose.yml
@@ -15,8 +15,8 @@ services:
- PYTHONUNBUFFERED=1
networks:
- ai-bots-network
- depends_on:
- - deepseek-bot
+ # depends_on:
+ # - deepseek-bot
healthcheck:
test: ["CMD", "python3", "-c", "import sys; sys.exit(0)"]
interval: 30s
@@ -45,28 +45,28 @@ services:
retries: 3
start_period: 30s
- # Claude Bot - Claude AI对话机器人
- claude-bot:
- build:
- context: ./ai_bot_3
- dockerfile: Dockerfile
- container_name: claude-discord-bot
- restart: unless-stopped
- volumes:
- - ../../tokens.txt:/home/will/docker/tokens.txt:ro
- - ../utils:/app/utils:ro
- environment:
- - PYTHONUNBUFFERED=1
- networks:
- - ai-bots-network
- depends_on:
- - deepseek-bot
- healthcheck:
- test: ["CMD", "python3", "-c", "import sys; sys.exit(0)"]
- interval: 30s
- timeout: 10s
- retries: 3
- start_period: 30s
+ # Claude Bot - Claude AI对话机器人 (已禁用 - token 问题)
+ # claude-bot:
+ # build:
+ # context: ./ai_bot_3
+ # dockerfile: Dockerfile
+ # container_name: claude-discord-bot
+ # restart: unless-stopped
+ # volumes:
+ # - ../../tokens.txt:/home/will/docker/tokens.txt:ro
+ # - ../utils:/app/utils:ro
+ # environment:
+ # - PYTHONUNBUFFERED=1
+ # networks:
+ # - ai-bots-network
+ # depends_on:
+ # - deepseek-bot
+ # healthcheck:
+ # test: ["CMD", "python3", "-c", "import sys; sys.exit(0)"]
+ # interval: 30s
+ # timeout: 10s
+ # retries: 3
+ # start_period: 30s
networks:
ai-bots-network:
diff --git a/docker-services-shutdown.service b/docker-services-shutdown.service
new file mode 100644
index 0000000..fc74caf
--- /dev/null
+++ b/docker-services-shutdown.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Gracefully shutdown Docker services before system halt/reboot
+DefaultDependencies=false
+Before=shutdown.target reboot.target halt.target
+Requires=docker.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=true
+ExecStart=/bin/true
+ExecStop=/home/will/docker/shutdown_docker_services.sh
+TimeoutStopSec=300
+User=will
+Group=will
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/docker-services-startup.service b/docker-services-startup.service
new file mode 100644
index 0000000..fb10efe
--- /dev/null
+++ b/docker-services-startup.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Start Docker services on system boot
+After=docker.service network-online.target
+Wants=network-online.target
+Requires=docker.service
+
+[Service]
+Type=oneshot
+ExecStart=/home/will/docker/startup_docker_services.sh
+TimeoutStartSec=600
+User=will
+Group=will
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/install_services.sh b/install_services.sh
new file mode 100755
index 0000000..2cd138a
--- /dev/null
+++ b/install_services.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+# 安装 Docker 服务管理脚本和定时任务
+
+echo "正在安装 Docker 服务管理脚本..."
+
+# 设置脚本执行权限
+chmod +x /home/will/docker/shutdown_docker_services.sh
+chmod +x /home/will/docker/startup_docker_services.sh
+
+# 创建日志目录
+sudo mkdir -p /var/log
+sudo touch /var/log/docker-shutdown.log
+sudo touch /var/log/docker-startup.log
+sudo chown will:will /var/log/docker-*.log
+
+# 复制 systemd 服务文件到系统目录
+sudo cp /home/will/docker/docker-services-shutdown.service /etc/systemd/system/
+sudo cp /home/will/docker/docker-services-startup.service /etc/systemd/system/
+
+# 重新加载 systemd 配置
+sudo systemctl daemon-reload
+
+# 启用服务
+sudo systemctl enable docker-services-shutdown.service
+sudo systemctl enable docker-services-startup.service
+
+echo "已安装 systemd 服务"
+
+# 配置每日凌晨1点重启的 cron 任务
+CRON_JOB="0 1 * * * /sbin/reboot"
+
+# 检查是否已存在该任务
+if ! crontab -l 2>/dev/null | grep -q "/sbin/reboot"; then
+ # 添加新的 cron 任务
+ (crontab -l 2>/dev/null; echo "$CRON_JOB") | crontab -
+ echo "已添加每日凌晨1点重启的定时任务"
+else
+ echo "定时重启任务已存在"
+fi
+
+echo "安装完成!"
+echo ""
+echo "服务状态:"
+sudo systemctl status docker-services-shutdown.service --no-pager
+sudo systemctl status docker-services-startup.service --no-pager
+echo ""
+echo "当前 crontab:"
+crontab -l
+echo ""
+echo "日志文件位置:"
+echo "- 关机日志: /var/log/docker-shutdown.log"
+echo "- 启动日志: /var/log/docker-startup.log"
\ No newline at end of file
diff --git a/router/.gitignore b/router/.gitignore
deleted file mode 100644
index bbc72e3..0000000
--- a/router/.gitignore
+++ /dev/null
@@ -1,32 +0,0 @@
-# Ignore all data directories
-*/data/
-*/postgres/
-*/vw-data/
-*/*.db
-*/*.sqlite
-*/logs/
-*/db_data/
-*/caddy_data/
-*/caddy_config/
-*/config/
-
-# Ignore environment files
-.env
-.env.local
-
-# Ignore temporary files
-*.tmp
-*.log
-
-# Ignore HA specific directories
-HA/config/
-HA/db_data/
-
-# Router specific ignores
-router/__pycache__/
-router/venv/
-router/*.pyc
-router/*.log
-
-# Keep structure
-!.gitkeep
\ No newline at end of file
diff --git a/router/Dockerfile b/router/Dockerfile
deleted file mode 100644
index a679451..0000000
--- a/router/Dockerfile
+++ /dev/null
@@ -1,32 +0,0 @@
-FROM python:3.11-slim
-
-# Set working directory
-WORKDIR /app
-
-# Install system dependencies
-RUN apt-get update && apt-get install -y \
- curl \
- && rm -rf /var/lib/apt/lists/*
-
-# Copy requirements first for better caching
-COPY requirements.txt .
-
-# Install Python dependencies
-RUN pip install --no-cache-dir -r requirements.txt
-
-# Copy application code
-COPY . .
-
-# Create non-root user
-RUN useradd -m -u 1000 router && chown -R router:router /app
-USER router
-
-# Expose port
-EXPOSE 8000
-
-# Health check
-HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
- CMD curl -f http://localhost:8000/health || exit 1
-
-# Run the application
-CMD ["python", "app.py"]
\ No newline at end of file
diff --git a/router/HA/2025-07-13-this-site-cant-provide-a-secure-connection.txt b/router/HA/2025-07-13-this-site-cant-provide-a-secure-connection.txt
deleted file mode 100644
index cb68f12..0000000
--- a/router/HA/2025-07-13-this-site-cant-provide-a-secure-connection.txt
+++ /dev/null
@@ -1,722 +0,0 @@
-╭───────────────────────────────────────────────────╮
-│ ✻ Welcome to Claude Code! │
-│ │
-│ /help for help, /status for your current setup │
-│ │
-│ cwd: /home/will/docker/HA │
-╰───────────────────────────────────────────────────╯
-
- Tips for getting started:
-
- 1. Run /init to create a CLAUDE.md file with instructions for Claude
- 2. Use Claude to help with file analysis, editing, bash commands and git
- 3. Be as specific as you would with another engineer for the best results
-
-> This site can’t provide a secure connection
- ha.will123song.xyz sent an invalid response.
- ERR_SSL_PROTOCOL_ERROR
-
-● I can see you're getting an SSL protocol error when trying to connect to ha.will123song.xyz. This typically
- indicates an SSL/TLS configuration issue.
-
-● Update Todos
- ⎿ ☐ Diagnose SSL configuration for ha.will123song.xyz
- ☐ Check Home Assistant SSL/TLS setup
- ☐ Verify certificate configuration
-
-● Let me check your Home Assistant configuration to diagnose the SSL issue:
-
-● List(.)
- ⎿ Listed 26 paths (ctrl+r to expand)
-
-● Bash(openssl s_client -connect ha.will123song.xyz:443 -servername ha.will123song.xyz)
- ⎿ Error: 40E7B0D8597F0000:error:0A000438:SSL routines:ssl3_read_bytes:tlsv1 alert internal
- error:../ssl/record/rec_layer_s3.c:1605:SSL alert number 80
-
- CONNECTED(00000003)
- ---
- no peer certificate available
- ---
- No client certificate CA names sent
- ---
- SSL handshake has read 7 bytes and written 324 bytes
- Verification: OK
- … +9 lines (ctrl+r to see all)
-
-● Read(docker-compose.yml)
- ⎿ Read 35 lines (ctrl+r to expand)
-
-● Read(config/configuration.yaml)
- ⎿ Read 12 lines (ctrl+r to expand)
-
-● The issue is that your Home Assistant is running on HTTP port 8123 without SSL/TLS configured, but you're trying
- to access it via HTTPS. You need to either:
-
- 1. Access via HTTP: http://ha.will123song.xyz:8123
- 2. Configure SSL in Home Assistant
-
-● Update Todos
- ⎿ ☒ Diagnose SSL configuration for ha.will123song.xyz
- ☒ Check Home Assistant SSL/TLS setup
- ☒ Verify certificate configuration
-
-● Solution: Your Home Assistant is running on HTTP port 8123 without SSL. Try accessing
- http://ha.will123song.xyz:8123 instead.
-
- To enable HTTPS, add this to your configuration.yaml:
- http:
- ssl_certificate: /path/to/cert.pem
- ssl_key: /path/to/privkey.pem
-
-> update whatever needs to be updated, to give me access to this docker at https://ha.will123song.xyz
-
-● Update Todos
- ⎿ ☐ Configure SSL/TLS for Home Assistant
- ☐ Set up reverse proxy or SSL certificates
- ☐ Update configuration to enable HTTPS access
-
-● I'll configure HTTPS access for your Home Assistant. The best approach is to add a reverse proxy with SSL
- certificates. Let me set this up:
-
-● Update(docker-compose.yml)
- ⎿ (No changes)
-
-> All SSL certificates should be handled by caddy
-
-● Update Todos
- ⎿ ☐ Configure Caddy reverse proxy for Home Assistant
- ☐ Set up automatic SSL certificates with Caddy
- ☐ Update docker-compose to use Caddy
-
-● I'll configure Caddy as the reverse proxy with automatic SSL certificates. Let me update your docker-compose:
-
-● Update(docker-compose.yml)
- ⎿ Updated docker-compose.yml with 20 additions and 3 removals
- 10 - ./config:/config
- 11 - /etc/localtime:/etc/localtime:ro
- 12 - /run/dbus:/run/dbus:ro
- 13 - ports:
- 14 - - "8123:8123"
- 13 + expose:
- 14 + - "8123"
- 15 environment:
- 16 - TZ=Asia/Shanghai
- 17 - network_mode: host
- 17 + networks:
- 18 + - ha-network
- 19 depends_on:
- 20 - homeassistant-db
- 21
- 22 + caddy:
- 23 + container_name: ha-caddy
- 24 + image: caddy:alpine
- 25 + restart: unless-stopped
- 26 + ports:
- 27 + - "80:80"
- 28 + - "443:443"
- 29 + volumes:
- 30 + - ./caddy/Caddyfile:/etc/caddy/Caddyfile
- 31 + - caddy_data:/data
- 32 + - caddy_config:/config
- 33 + networks:
- 34 + - ha-network
- 35 + depends_on:
- 36 + - homeassistant
- 37 +
- 38 homeassistant-db:
- 39 container_name: homeassistant-db
- 40 image: postgres:15
-
-● Update(docker-compose.yml)
- ⎿ Updated docker-compose.yml with 7 additions and 1 removal
- 49 - "5432:5432"
- 50
- 51 volumes:
- 52 - db_data:
- 52 \ No newline at end of file
- 53 + db_data:
- 54 + caddy_data:
- 55 + caddy_config:
- 56 +
- 57 + networks:
- 58 + ha-network:
- 59 + driver: bridge
- 60 \ No newline at end of file
-
-● Update Todos
- ⎿ ☒ Configure Caddy reverse proxy for Home Assistant
- ☒ Update docker-compose to use Caddy
- ☐ Set up automatic SSL certificates with Caddy
-
-● Bash(mkdir -p /home/will/docker/HA/caddy)
- ⎿ No (tell Claude what to do differently)
-
-> access ~/docker/caddy to manipulate caddy
-
-● List(../caddy)
- ⎿ Listed 10 paths (ctrl+r to expand)
-
-● Read(../caddy/Caddyfile)
- ⎿ Read 42 lines (ctrl+r to expand)
-
-● Perfect! I can see that Caddy is already configured with Home Assistant at ha.will123song.xyz on line 30-32. Now
- I need to update your Home Assistant docker-compose to use the existing Caddy network:
-
-● Read(../caddy/docker-compose.yml)
- ⎿ Read 20 lines (ctrl+r to expand)
-
-● Update(docker-compose.yml)
- ⎿ Updated docker-compose.yml with 1 addition and 17 removals
- 15 environment:
- 16 - TZ=Asia/Shanghai
- 17 networks:
- 18 - - ha-network
- 18 + - caddy-network
- 19 depends_on:
- 20 - homeassistant-db
- 21
- 22 - caddy:
- 23 - container_name: ha-caddy
- 24 - image: caddy:alpine
- 25 - restart: unless-stopped
- 26 - ports:
- 27 - - "80:80"
- 28 - - "443:443"
- 29 - volumes:
- 30 - - ./caddy/Caddyfile:/etc/caddy/Caddyfile
- 31 - - caddy_data:/data
- 32 - - caddy_config:/config
- 33 - networks:
- 34 - - ha-network
- 35 - depends_on:
- 36 - - homeassistant
- 37 -
- 22 homeassistant-db:
- 23 container_name: homeassistant-db
- 24 image: postgres:15
-
-● Update(docker-compose.yml)
- ⎿ Updated docker-compose.yml with 4 additions and 2 removals
- 29 POSTGRES_PASSWORD: homeassistant_password
- 30 volumes:
- 31 - ./db_data:/var/lib/postgresql/data
- 32 - ports:
- 33 - - "5432:5432"
- 32 + expose:
- 33 + - "5432"
- 34 + networks:
- 35 + - caddy-network
- 36
- 37 volumes:
- 38 db_data:
-
-● Update(docker-compose.yml)
- ⎿ Updated docker-compose.yml with 2 additions and 4 removals
- 36
- 37 volumes:
- 38 db_data:
- 39 - caddy_data:
- 40 - caddy_config:
- 39
- 40 networks:
- 41 - ha-network:
- 42 - driver: bridge
- 41 \ No newline at end of file
- 42 + caddy-network:
- 43 + external: true
- 44 \ No newline at end of file
-
-● Update Todos
- ⎿ ☒ Configure Caddy reverse proxy for Home Assistant
- ☒ Set up automatic SSL certificates with Caddy
- ☒ Update docker-compose to use Caddy
-
-● Configuration complete! Your Home Assistant is now configured to work with your existing Caddy setup at
- https://ha.will123song.xyz.
-
- To activate the changes, restart your containers:
-
- cd /home/will/docker/HA && docker-compose down && docker-compose up -d
-
- The Caddy configuration already includes the Home Assistant proxy on line 30-32 of your Caddyfile, so SSL
- certificates will be handled automatically.
-
-> will@debian ~/docker/HA 21:21 docker compose down && docker compose up -d
- WARN[0000] /home/will/docker/HA/docker-compose.yml: the attribute `version` is obsolete, it will be ignored,
- please remove it to avoid potential confusion
- [+] Running 2/2
- ✔ Container homeassistant Removed
- 4.3s
- ✔ Container homeassistant-db Removed
- 0.3s
- WARN[0000] /home/will/docker/HA/docker-compose.yml: the attribute `version` is obsolete, it will be ignored,
- please remove it to avoid potential confusion
- network caddy-network declared as external, but could not be found
- will@debian ~/docker/HA 21:21
-
-● The Caddy network needs to be created first. Let me fix this:
-
-● Bash(cd /home/will/docker/caddy && docker-compose up -d)
- ⎿ Error: /bin/bash: line 1: docker-compose: command not found
-
-● Bash(cd /home/will/docker/caddy && docker compose up -d)
- ⎿ time="2025-07-13T21:22:05-05:00" level=warning msg="/home/will/docker/caddy/docker-compose.yml: the attribute `
- version` is obsolete, it will be ignored, please remove it to avoid potential confusion"
- Container caddy Running
-
-
-● Bash(cd /home/will/docker/HA && docker compose up -d)
- ⎿ Error: time="2025-07-13T21:22:08-05:00" level=warning msg="/home/will/docker/HA/docker-compose.yml: the
- attribute `version` is obsolete, it will be ignored, please remove it to avoid potential confusion"
- network caddy-network declared as external, but could not be found
-
-● Let me check the actual network name created by Caddy:
-
-● Bash(docker network ls)
- ⎿ NETWORK ID NAME DRIVER SCOPE
- 0276fe9854e6 bridge bridge local
- 44f94bf99a43 caddy_caddy-network bridge local
- … +4 lines (ctrl+r to expand)
-
-● The network is named caddy_caddy-network. Let me fix the configuration:
-
-● Update(docker-compose.yml)
- ⎿ Updated docker-compose.yml with 2 additions and 1 removal
- 39
- 40 networks:
- 41 caddy-network:
- 42 - external: true
- 42 \ No newline at end of file
- 43 + external: true
- 44 + name: caddy_caddy-network
- 45 \ No newline at end of file
-
-● Bash(cd /home/will/docker/HA && docker compose up -d)
- ⎿ time="2025-07-13T21:22:23-05:00" level=warning msg="/home/will/docker/HA/docker-compose.yml: the attribute `ver
- sion` is obsolete, it will be ignored, please remove it to avoid potential confusion"
- Container homeassistant-db Creating
- … +7 lines (ctrl+r to expand)
-
-
-● Perfect! Your Home Assistant is now running and connected to the Caddy network. You should now be able to access
- it at https://ha.will123song.xyz with automatic SSL certificates provided by Caddy.
-
-> This site can’t provide a secure connection
- ha.will123song.xyz sent an invalid response.
- ERR_SSL_PROTOCOL_ERROR
-
-● Let me check if there's a conflict with the existing Caddy configuration and troubleshoot the SSL issue:
-
-● Bash(docker logs caddy --tail 20)
- ⎿ {
- "level": "error",
- "ts": 1752459273.2643094,
- … +896 lines (ctrl+r to expand)
-
-● Bash(docker logs homeassistant --tail 10)
- ⎿ s6-rc: info: service s6rc-oneshot-runner: starting
- s6-rc: info: service s6rc-oneshot-runner successfully started
- s6-rc: info: service fix-attrs: starting
- … +6 lines (ctrl+r to expand)
-
-● The issue is DNS resolution - Let's check if ha.will123song.xyz domain exists and test the connection:
-
-● Bash(nslookup ha.will123song.xyz)
- ⎿ Server: 100.100.100.100
- Address: 100.100.100.100#53
-
- … +3 lines (ctrl+r to expand)
-
-● Bash(curl -I http://ha.will123song.xyz)
- ⎿ HTTP/1.1 308 Permanent Redirect
- Connection: close
- Location: https://ha.will123song.xyz/
- … +6 lines (ctrl+r to expand)
-
-● The domain resolves correctly. The issue is that Caddy is failing to get SSL certificates because it can't reach
- the homeassistant container. Let me check if the containers can communicate:
-
-● Bash(docker exec caddy nslookup homeassistant)
- ⎿ Error: Server: 127.0.0.11
- Address: 127.0.0.11:53
-
- ** server can't find homeassistant.fossa-dinosaur.ts.net: NXDOMAIN
-
- ** server can't find homeassistant.fossa-dinosaur.ts.net: NXDOMAIN
-
-● Bash(docker exec caddy wget -qO- http://homeassistant:8123)
- ⎿
Home Assistant homeassistant.fossa-dinosaur.ts.net should also be reversed to ha. .fossa-dinosaur.ts.net is my tailscle
-
-● I'll add the Tailscale domain to your Caddy configuration so you can access Home Assistant via both the public
- domain and your Tailscale network:
-
-● Update(../caddy/Caddyfile)
- ⎿ Updated ../caddy/Caddyfile with 1 addition and 1 removal
- 27 reverse_proxy sillytavern:8000
- 28 }
- 29
- 30 - ha.will123song.xyz {
- 30 + ha.will123song.xyz, homeassistant.fossa-dinosaur.ts.net {
- 31 reverse_proxy homeassistant:8123
- 32 }
- 33
-
-● Bash(docker exec caddy caddy reload --config /etc/caddy/Caddyfile)
- ⎿ {
- "level": "info",
- "ts": 1752459878.5068831,
- … +17 lines (ctrl+r to expand)
-
-● Done! Your Home Assistant is now accessible via both:
- - https://ha.will123song.xyz (public)
- - https://homeassistant.fossa-dinosaur.ts.net (Tailscale)
-
- Caddy will automatically handle SSL certificates for both domains.
-
-> 400: Bad Request fromhttps://ha.will123song.xyz/
-
-● The 400 Bad Request error is likely because Home Assistant doesn't trust the domain. Let me configure Home
- Assistant to allow access from your domains:
-
-● Read(config/configuration.yaml)
- ⎿ Read 12 lines (ctrl+r to expand)
-
-● Update(config/configuration.yaml)
- ⎿ Error: EACCES: permission denied, open '/home/will/docker/HA/config/configuration.yaml'
-
-● Bash(sudo chown will:will /home/will/docker/HA/config/configuration.yaml)
- ⎿ Error: sudo: a terminal is required to read the password; either use the -S option to read from standard input
- or configure an askpass helper
- sudo: a password is required
-
-● Bash(ls -la /home/will/docker/HA/config/configuration.yaml)
- ⎿ -rw-r--r-- 1 root root 265 Jul 13 21:12 /home/will/docker/HA/config/configuration.yaml
-
-● The file is owned by root. Let me try a different approach - I'll recreate the file with the correct content:
-
-● Bash(docker exec homeassistant cp /config/configuration.yaml /config/configuration.yaml.backup)
- ⎿ (No content)
-
-● Bash(docker exec homeassistant sh -c 'cat > /config/configuration.yaml << EOF…)
- ⎿ (No content)
-
-● Bash(docker restart homeassistant)
- ⎿ homeassistant
-
-● Bash(sleep 15 && curl -I https://ha.will123song.xyz)
- ⎿ HTTP/2 400
- alt-svc: h3=":443"; ma=2592000
- content-type: text/plain; charset=utf-8
- … +8 lines (ctrl+r to expand)
-
-● Let me check the Home Assistant logs to see what's causing the 400 error:
-
-● Bash(docker logs homeassistant --tail 20)
- ⎿ 2025-07-14 10:25:56.483 ERROR (MainThread) [homeassistant.setup] Unable to set up dependencies of 'system_
- health'. Setup failed for dependencies: ['http']
- 2025-07-14 10:25:56.483 ERROR (MainThread) [homeassistant.setup] Setup failed for 'system_health': Could n
- … +33 lines (ctrl+r to expand)
-
-● The configuration has YAML syntax errors. Let me fix the configuration file:
-
-● Bash(docker exec homeassistant sh -c 'cat > /config/configuration.yaml << EOF…)
- ⎿ (No content)
-
-● Bash(docker restart homeassistant)
- ⎿ homeassistant
-
-● Bash(sleep 20 && curl -I https://ha.will123song.xyz)
- ⎿ HTTP/2 400
- alt-svc: h3=":443"; ma=2592000
- content-type: text/plain; charset=utf-8
- … +9 lines (ctrl+r to expand)
-
-● Bash(docker logs homeassistant --tail 10)
- ⎿ 2025-07-14 10:26:33.576 ERROR (MainThread) [homeassistant.setup] Setup failed for 'my': Could not setup de
- pendencies: frontend
- 2025-07-14 10:26:33.580 ERROR (MainThread) [homeassistant.setup] Unable to set up dependencies of 'media_s
- … +21 lines (ctrl+r to expand)
-
-● The configuration still has issues. Let me restore the original and add just the HTTP configuration:
-
-● Bash(docker exec homeassistant cp /config/configuration.yaml.backup /config/configuration.yaml)
- ⎿ (No content)
-
-● Bash(docker exec homeassistant sh -c 'cat >> /config/configuration.yaml << EOF…)
- ⎿ (No content)
-
-● Bash(docker restart homeassistant)
- ⎿ homeassistant
-
-● Bash(sleep 20 && curl -L https://ha.will123song.xyz)
- ⎿ Home Assistant write a script to make a back_up of all my dockers (meaning everything under ~/docker) to ~/back_up
-
-● Update Todos
- ⎿ ☐ Create backup script for ~/docker directory
- ☐ Test the backup script
-
-● Write(../../backup_docker.sh)
- ⎿ Wrote 126 lines to ../../backup_docker.sh
- #!/bin/bash
- # Docker Backup Script
- # Backs up all docker containers and data to ~/back_up
- set -e # Exit on any error
- # Configuration
- SOURCE_DIR="$HOME/docker"
- BACKUP_BASE_DIR="$HOME/back_up"
- … +116 lines (ctrl+r to expand)
-
-● Bash(chmod +x /home/will/backup_docker.sh)
- ⎿ (No content)
-
-● Update Todos
- ⎿ ☒ Create backup script for ~/docker directory
- ☐ Test the backup script
-
-● Bash(mkdir -p /home/will/back_up)
- ⎿ (No content)
-
-● Bash(/home/will/backup_docker.sh)
- ⎿ Error: /home/will/backup_docker.sh: line 54: rsync: command not found
-
- [2025-07-13 21:29:57] Creating backup directory: /home/will/back_up/docker_backup_20250713_212957
- [2025-07-13 21:29:57] Stopping all running containers...
- 4434a8f4fd79
- 420e784c6783
- f319a26b957f
- [2025-07-13 21:30:02] Stopped containers: 4434a8f4fd79
- 420e784c6783
- f319a26b957f
- … +1 line (ctrl+r to see all)
-
-● Let me fix the script to use cp instead of rsync since it's not installed:
-
-● Update(../../backup_docker.sh)
- ⎿ Updated ../../backup_docker.sh with 1 addition and 1 removal
- 51
- 52 # Create backup
- 53 log "Starting backup of $SOURCE_DIR to $BACKUP_DIR"
- 54 - rsync -av --progress "$SOURCE_DIR/" "$BACKUP_DIR/docker/"
- 54 + cp -r "$SOURCE_DIR" "$BACKUP_DIR/"
- 55
- 56 # Export docker images
- 57 log "Exporting Docker images..."
-
-● Bash(docker start $(docker ps -aq))
- ⎿ 4434a8f4fd79
- 420e784c6783
- f319a26b957f
-
-● Bash(/home/will/backup_docker.sh)
- ⎿ Error: cp: cannot access '/home/will/docker/caddy/caddy_data/caddy': Permission denied
- cp: cannot access '/home/will/docker/caddy/caddy_config/caddy': Permission denied
- cp: cannot access '/home/will/docker/HA/db_data': Permission denied
- cp: cannot open '/home/will/docker/HA/config/.storage/auth_provider.homeassistant' for reading: Permission
- denied
- cp: cannot open '/home/will/docker/HA/config/.storage/core.uuid' for reading: Permission denied
- cp: cannot open '/home/will/docker/HA/config/.storage/core.config' for reading: Permission denied
- cp: cannot open '/home/will/docker/HA/config/.storage/onboarding' for reading: Permission denied
- cp: cannot open '/home/will/docker/HA/config/.storage/auth' for reading: Permission denied
-
- [2025-07-13 21:30:17] Creating backup directory: /home/will/back_up/docker_backup_20250713_213017
- … +8 lines (ctrl+r to see all)
-
-● The script needs to handle permission issues. Let me update it to use sudo for copying files owned by Docker:
-
-● Update(../../backup_docker.sh)
- ⎿ Updated ../../backup_docker.sh with 3 additions and 1 removal
- 51
- 52 # Create backup
- 53 log "Starting backup of $SOURCE_DIR to $BACKUP_DIR"
- 54 - cp -r "$SOURCE_DIR" "$BACKUP_DIR/"
- 54 + # Use sudo to handle Docker-owned files
- 55 + sudo cp -r "$SOURCE_DIR" "$BACKUP_DIR/"
- 56 + sudo chown -R $(whoami):$(whoami) "$BACKUP_DIR/docker"
- 57
- 58 # Export docker images
- 59 log "Exporting Docker images..."
-
-● Update Todos
- ⎿ ☒ Create backup script for ~/docker directory
- ☒ Test the backup script
-
-● Bash(docker start $(docker ps -aq))
- ⎿ 4434a8f4fd79
- 420e784c6783
- f319a26b957f
-
-● Created /home/will/backup_docker.sh - a comprehensive backup script that:
-
- Features:
- - Stops all containers before backup
- - Copies entire ~/docker directory to timestamped backup folder
- - Exports all Docker images as tar files
- - Saves container configurations and network definitions
- - Lists all volumes and networks
- - Restarts containers after backup
- - Creates a "latest" symlink for easy access
- - Handles Docker-owned files with sudo
-
- Usage:
- ./backup_docker.sh
-
- Output location: ~/back_up/docker_backup_YYYYMMDD_HHMMSS/
-
- The script requires sudo permissions to access Docker-owned files. Each backup includes restoration instructions
- in backup_info.txt.
-
-> This site can’t be reached
- The webpage at
- https://ha.will123song.xyz/?auth_callback=1&code=10dfbb0b48c14f17ba5baaf1844ac2b4&storeToken=true&state=eyJoYXNzV
- XJsIjoiaHR0cHM6Ly9oYS53aWxsMTIzc29uZy54eXoiLCJjbGllbnRJZCI6Imh0dHBzOi8vaGEud2lsbDEyM3NvbmcueHl6LyJ9 might be
- temporarily down or it may have moved permanently to a new web address.
-
-● The connection issue is likely because the containers were stopped during the backup script test. Let me check
- and restart the Home Assistant services:
-
-● Bash(docker ps -a)
- ⎿ CONTAINER ID IMAGE COMMAND CREATED STATUS
- PORTS
- NAMES
- … +9 lines (ctrl+r to expand)
-
-● Bash(docker compose up -d)
- ⎿ time="2025-07-13T21:32:25-05:00" level=warning msg="/home/will/docker/HA/docker-compose.yml: the attribute `ver
- sion` is obsolete, it will be ignored, please remove it to avoid potential confusion"
- Container homeassistant-db Running
- … +1 line (ctrl+r to expand)
-
-
-● All containers are running. Let me test the Home Assistant connection:
-
-● Bash(curl -I https://ha.will123song.xyz)
- ⎿ HTTP/2 405
- allow: GET
- alt-svc: h3=":443"; ma=2592000
- … +12 lines (ctrl+r to expand)
-
-● Bash(docker logs homeassistant --tail 5)
- ⎿ s6-rc: info: service legacy-cont-init: starting
- s6-rc: info: service legacy-cont-init successfully started
- s6-rc: info: service legacy-services: starting
- … +2 lines (ctrl+r to expand)
-
-● The services are running and responding correctly. The issue might be a temporary DNS or network problem. Let me
- check a few things:
-
-● Bash(nslookup ha.will123song.xyz)
- ⎿ Server: 100.100.100.100
- Address: 100.100.100.100#53
-
- … +3 lines (ctrl+r to expand)
-
-● Bash(curl -L https://ha.will123song.xyz 2>/dev/null | head -n 5)
- ⎿ Home Assistant /config
- ⎿ (no content)
-
diff --git a/router/HA/docker-compose.yml b/router/HA/docker-compose.yml
deleted file mode 100644
index 29b5daa..0000000
--- a/router/HA/docker-compose.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-version: '3.8'
-
-services:
- homeassistant:
- container_name: homeassistant
- image: ghcr.io/home-assistant/home-assistant:stable
- restart: unless-stopped
- privileged: true
- volumes:
- - ./config:/config
- - /etc/localtime:/etc/localtime:ro
- - /run/dbus:/run/dbus:ro
- expose:
- - "8123"
- environment:
- - TZ=Asia/Shanghai
- networks:
- - caddy-network
- depends_on:
- - homeassistant-db
-
- homeassistant-db:
- container_name: homeassistant-db
- image: postgres:15
- restart: unless-stopped
- environment:
- POSTGRES_DB: homeassistant
- POSTGRES_USER: homeassistant
- POSTGRES_PASSWORD: homeassistant_password
- volumes:
- - ./db_data:/var/lib/postgresql/data
- expose:
- - "5432"
- networks:
- - caddy-network
-
-volumes:
- db_data:
-
-networks:
- caddy-network:
- external: true
- name: caddy_caddy-network
\ No newline at end of file
diff --git a/router/README.md b/router/README.md
deleted file mode 100644
index 492fe7f..0000000
--- a/router/README.md
+++ /dev/null
@@ -1,314 +0,0 @@
-# Claude API Router
-
-智能的Claude API路由器,提供自动故障转移和负载均衡功能。当主要API服务遇到限制或错误时,自动切换到备用提供商。
-
-## 🚀 功能特性
-
-- **自动故障转移**: 检测到速率限制或错误时自动切换API提供商
-- **多提供商支持**: 支持Claude Pro、DeepSeek、Kimi
-- **手动切换**: 支持手动强制切换到指定提供商
-- **健康检查**: 自动监控和恢复主要提供商
-- **完全兼容**: 与Anthropic Claude API完全兼容
-- **流式响应**: 支持流式和非流式响应
-
-## 📋 支持的提供商
-
-按优先级排序:
-
-1. **Claude Pro** - 最高优先级,首选提供商
-2. **DeepSeek** - OpenAI兼容的备用提供商
-3. **Kimi** - OpenAI兼容的备用提供商
-
-## 🛠️ 安装和配置
-
-### 1. 环境要求
-
-```bash
-cd router
-pip install -r requirements.txt
-```
-
-### 2. 配置API密钥
-
-在 `/home/will/docker/tokens.txt` 文件中添加以下令牌:
-
-```txt
-claude_api_key=your_claude_api_key_here
-deepseek_api_key=your_deepseek_api_key_here
-kimi_api_key=your_kimi_api_key_here
-```
-
-或者设置环境变量:
-
-```bash
-export CLAUDE_API_KEY="your_claude_api_key"
-export DEEPSEEK_API_KEY="your_deepseek_api_key"
-export KIMI_API_KEY="your_kimi_api_key"
-```
-
-### 3. 启动服务
-
-```bash
-# 开发模式
-python app.py
-
-# 生产模式 (使用Docker Compose)
-docker-compose up -d
-```
-
-服务将在 `http://localhost:8000` 启动。
-
-## 🔧 手动切换提供商
-
-### API端点
-
-```http
-POST /v1/switch-provider
-Content-Type: application/json
-
-{
- "provider": "provider_name"
-}
-```
-
-### 可用的提供商名称
-
-- `claude_pro` - Claude Pro提供商 (默认)
-- `deepseek` - DeepSeek提供商
-- `kimi` - Kimi提供商
-
-### 使用示例
-
-#### curl命令
-
-```bash
-# 切换到DeepSeek
-curl -X POST http://localhost:8000/v1/switch-provider \
- -H "Content-Type: application/json" \
- -d '{"provider": "deepseek"}'
-
-# 切换到Kimi
-curl -X POST http://localhost:8000/v1/switch-provider \
- -H "Content-Type: application/json" \
- -d '{"provider": "kimi"}'
-
-# 切换回Claude Pro
-curl -X POST http://localhost:8000/v1/switch-provider \
- -H "Content-Type: application/json" \
- -d '{"provider": "claude_pro"}'
-```
-
-#### Python脚本
-
-```python
-import requests
-
-def switch_provider(provider_name):
- """切换API提供商"""
- url = "http://localhost:8000/v1/switch-provider"
- data = {"provider": provider_name}
-
- response = requests.post(url, json=data)
- if response.status_code == 200:
- result = response.json()
- print(f"成功切换到: {result['current_provider']}")
- return True
- else:
- print(f"切换失败: {response.text}")
- return False
-
-# 使用示例
-switch_provider("deepseek")
-switch_provider("kimi")
-switch_provider("claude_pro")
-```
-
-### 查看当前状态
-
-```bash
-# 查看当前使用的提供商
-curl http://localhost:8000/v1/health
-
-# 响应示例
-{
- "status": "healthy",
- "current_provider": "claude_pro",
- "providers": {
- "claude_pro": "available",
- "deepseek": "unknown",
- "kimi": "unknown"
- }
-}
-```
-
-## 🔄 自动故障转移
-
-路由器会自动检测以下错误并进行故障转移:
-
-- 速率限制 (Rate limit exceeded)
-- 使用配额超额 (Usage limit exceeded)
-- HTTP 429错误
-- 每日/月度限制达到
-- 网络连接错误
-
-### 故障转移流程
-
-1. 检测到故障转移触发条件
-2. 自动切换到下一个优先级提供商
-3. 重试请求
-4. 记录切换日志
-5. 定期尝试恢复到主要提供商
-
-## 🩺 健康检查和自动恢复
-
-- **自动检查频率**: 每小时的前5分钟
-- **检查内容**: 向Claude Pro发送测试请求
-- **自动恢复**: 如果Claude Pro恢复可用,自动切换回去
-
-## 🔌 API兼容性
-
-路由器完全兼容Anthropic Claude API,支持:
-
-- 所有Claude模型 (claude-3-sonnet, claude-3-opus等)
-- 流式和非流式响应
-- 系统消息和用户消息
-- 所有API参数 (max_tokens, temperature等)
-
-### 使用示例
-
-```python
-import requests
-
-# 标准Claude API调用
-url = "http://localhost:8000/v1/messages"
-headers = {
- "Content-Type": "application/json",
- "x-api-key": "your_claude_api_key"
-}
-data = {
- "model": "claude-3-sonnet-20240229",
- "max_tokens": 1000,
- "messages": [
- {"role": "user", "content": "Hello, Claude!"}
- ]
-}
-
-response = requests.post(url, headers=headers, json=data)
-print(response.json())
-```
-
-## 🐛 故障排除
-
-### 常见问题
-
-#### 1. 自动切换不工作
-
-**问题**: 遇到限制时不自动切换
-
-**解决方案**:
-```bash
-# 手动切换到备用提供商
-curl -X POST http://localhost:8000/v1/switch-provider \
- -H "Content-Type: application/json" \
- -d '{"provider": "deepseek"}'
-```
-
-#### 2. API密钥错误
-
-**问题**: 提示API密钥无效
-
-**解决方案**:
-1. 检查 `tokens.txt` 文件中的密钥格式
-2. 确保密钥没有多余的空格或换行
-3. 验证密钥是否有效
-
-#### 3. 服务无法启动
-
-**问题**: 路由器启动失败
-
-**解决方案**:
-```bash
-# 检查日志
-docker-compose logs router
-
-# 手动启动调试
-cd router
-python app.py
-```
-
-### 调试模式
-
-启用详细日志:
-
-```python
-import logging
-logging.basicConfig(level=logging.DEBUG)
-```
-
-## 📊 监控和日志
-
-### 日志级别
-
-- `INFO`: 正常操作日志
-- `WARNING`: 提供商切换警告
-- `ERROR`: 错误和故障转移事件
-
-### 关键日志信息
-
-- 提供商切换事件
-- API请求失败
-- 健康检查结果
-- 自动恢复操作
-
-## ⚙️ 高级配置
-
-### 自定义故障转移条件
-
-编辑 `app.py` 中的 `should_failover` 方法:
-
-```python
-def should_failover(self, error_message: str) -> bool:
- # 添加自定义错误检测条件
- custom_indicators = [
- "your_custom_error",
- "specific_provider_error"
- ]
- return any(indicator in error_message.lower()
- for indicator in custom_indicators)
-```
-
-### 调整健康检查频率
-
-修改 `app.py` 中的定时任务:
-
-```python
-# 每30分钟检查一次
-@scheduler.cron("*/30 * * * *")
-async def health_check_claude_pro():
- # ... 健康检查逻辑
-```
-
-## 🤝 SillyTavern集成
-
-在SillyTavern中使用此路由器:
-
-1. **API URL**: `http://localhost:8000/v1/messages`
-2. **API Key**: 使用你的Claude API密钥
-3. **模型**: 选择任何Claude模型
-
-路由器会自动处理故障转移,对SillyTavern完全透明。
-
-## 📄 许可证
-
-MIT License
-
-## 🆘 获取帮助
-
-如果遇到问题:
-
-1. 检查日志文件
-2. 验证API密钥配置
-3. 尝试手动切换提供商
-4. 查看健康检查状态
-
-需要进一步帮助请查看代码注释或联系开发者。
\ No newline at end of file
diff --git a/router/app.py b/router/app.py
deleted file mode 100644
index e453bd9..0000000
--- a/router/app.py
+++ /dev/null
@@ -1,338 +0,0 @@
-import asyncio
-import json
-import logging
-from datetime import datetime
-from typing import Dict, Any, Optional
-from contextlib import asynccontextmanager
-
-import httpx
-from fastapi import FastAPI, Request, HTTPException
-from fastapi.responses import StreamingResponse, JSONResponse
-from anthropic import Anthropic
-from apscheduler.schedulers.asyncio import AsyncIOScheduler
-from apscheduler.triggers.cron import CronTrigger
-
-from config import config
-
-# Configure logging
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-class ClaudeRouter:
- def __init__(self):
- self.current_provider = "claude_pro"
- self.failover_count = 0
- self.last_failover = None
- self.last_health_check = None
- self.health_check_failures = 0
- self.scheduler = None
- self.providers = {
- "claude_pro": {
- "api_key": config.claude_pro_api_key,
- "base_url": config.claude_pro_base_url,
- "active": True
- },
- "claude_api": {
- "api_key": config.claude_api_key,
- "base_url": config.claude_api_base_url,
- "active": True
- }
- }
-
- async def get_anthropic_client(self, provider: str) -> Anthropic:
- """Get Anthropic client for the specified provider"""
- if provider not in self.providers:
- raise ValueError(f"Unknown provider: {provider}")
-
- provider_config = self.providers[provider]
- return Anthropic(
- api_key=provider_config["api_key"],
- base_url=provider_config["base_url"]
- )
-
- async def should_failover(self, error: Exception) -> bool:
- """Determine if we should failover based on the error"""
- error_str = str(error).lower()
-
- # Check for rate limiting or usage limit errors
- failover_indicators = [
- "rate_limit",
- "usage limit",
- "quota exceeded",
- "429",
- "too many requests",
- "limit reached"
- ]
-
- return any(indicator in error_str for indicator in failover_indicators)
-
- async def failover_to_next_provider(self):
- """Switch to the next available provider"""
- providers_list = list(self.providers.keys())
- current_index = providers_list.index(self.current_provider)
-
- # Try next provider
- for i in range(1, len(providers_list)):
- next_index = (current_index + i) % len(providers_list)
- next_provider = providers_list[next_index]
-
- if self.providers[next_provider]["active"]:
- logger.info(f"Failing over from {self.current_provider} to {next_provider}")
- self.current_provider = next_provider
- self.failover_count += 1
- self.last_failover = datetime.now()
- return True
-
- logger.error("No active providers available for failover")
- return False
-
- async def make_request(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
- """Make request with automatic failover"""
- max_attempts = len(self.providers)
-
- for attempt in range(max_attempts):
- try:
- client = await self.get_anthropic_client(self.current_provider)
-
- # Extract parameters from request
- messages = request_data.get("messages", [])
- model = request_data.get("model", "claude-3-sonnet-20240229")
- max_tokens = request_data.get("max_tokens", 4096)
- stream = request_data.get("stream", False)
-
- logger.info(f"Making request with provider: {self.current_provider}")
-
- # Make the API call
- if hasattr(client, 'messages'):
- response = await asyncio.to_thread(
- client.messages.create,
- model=model,
- max_tokens=max_tokens,
- messages=messages,
- stream=stream
- )
- else:
- # For older anthropic versions
- response = await asyncio.to_thread(
- client.completions.create,
- model=model,
- max_tokens_to_sample=max_tokens,
- prompt=f"Human: {messages[0]['content']}\n\nAssistant:",
- stream=stream
- )
-
- return response
-
- except Exception as e:
- logger.error(f"Request failed with {self.current_provider}: {str(e)}")
-
- if await self.should_failover(e) and attempt < max_attempts - 1:
- if await self.failover_to_next_provider():
- continue
-
- # If this is the last attempt or failover failed, raise the error
- if attempt == max_attempts - 1:
- raise HTTPException(status_code=500, detail=f"All providers failed. Last error: {str(e)}")
-
- raise HTTPException(status_code=500, detail="No providers available")
-
- async def health_check_claude_pro(self):
- """Check if Claude Pro is available again"""
- # Only check if we're not currently using Claude Pro
- if self.current_provider == "claude_pro":
- logger.debug("Skipping health check - already using Claude Pro")
- return
-
- logger.info("Running Claude Pro health check...")
- self.last_health_check = datetime.now()
-
- try:
- client = Anthropic(
- api_key=config.claude_pro_api_key,
- base_url=config.claude_pro_base_url
- )
-
- # Send a minimal test message
- if hasattr(client, 'messages'):
- response = await asyncio.to_thread(
- client.messages.create,
- model=config.health_check_model,
- max_tokens=10,
- messages=[{"role": "user", "content": config.health_check_message}]
- )
- else:
- # For older anthropic versions
- response = await asyncio.to_thread(
- client.completions.create,
- model=config.health_check_model,
- max_tokens_to_sample=10,
- prompt=f"Human: {config.health_check_message}\n\nAssistant:"
- )
-
- # If successful, switch back to Claude Pro
- old_provider = self.current_provider
- self.current_provider = "claude_pro"
- self.health_check_failures = 0
-
- logger.info(f"Claude Pro health check successful! Switched from {old_provider} to claude_pro")
-
- except Exception as e:
- self.health_check_failures += 1
- error_str = str(e).lower()
-
- if any(indicator in error_str for indicator in ["rate_limit", "usage limit", "quota exceeded", "429", "too many requests", "limit reached"]):
- logger.info(f"Claude Pro still rate limited: {str(e)}")
- else:
- logger.warning(f"Claude Pro health check failed (attempt {self.health_check_failures}): {str(e)}")
-
- def start_scheduler(self):
- """Start the health check scheduler"""
- if not config.health_check_enabled:
- logger.info("Health check disabled in config")
- return
-
- self.scheduler = AsyncIOScheduler()
-
- # Schedule health check using cron expression
- self.scheduler.add_job(
- self.health_check_claude_pro,
- trigger=CronTrigger.from_crontab(config.health_check_cron),
- id="claude_pro_health_check",
- name="Claude Pro Health Check",
- misfire_grace_time=60
- )
-
- self.scheduler.start()
- logger.info(f"Health check scheduler started with cron: {config.health_check_cron}")
-
- def stop_scheduler(self):
- """Stop the health check scheduler"""
- if self.scheduler:
- self.scheduler.shutdown()
- logger.info("Health check scheduler stopped")
-
-# Initialize router
-router = ClaudeRouter()
-
-@asynccontextmanager
-async def lifespan(app: FastAPI):
- logger.info("Claude Router starting up...")
- logger.info(f"Current provider: {router.current_provider}")
-
- # Start health check scheduler
- router.start_scheduler()
-
- yield
-
- # Stop scheduler on shutdown
- router.stop_scheduler()
- logger.info("Claude Router shutting down...")
-
-app = FastAPI(
- title="Claude Router",
- description="Smart router for Claude API with automatic failover",
- version="1.0.0",
- lifespan=lifespan
-)
-
-@app.get("/health")
-async def health_check():
- """Health check endpoint"""
- return {
- "status": "healthy",
- "current_provider": router.current_provider,
- "failover_count": router.failover_count,
- "last_failover": router.last_failover.isoformat() if router.last_failover else None,
- "providers": {
- name: {"active": provider_config["active"]}
- for name, provider_config in router.providers.items()
- },
- "last_health_check": router.last_health_check.isoformat() if router.last_health_check else None,
- "health_check_failures": router.health_check_failures
- }
-
-@app.post("/v1/messages")
-async def create_message(request: Request):
- """Handle Claude API message creation with failover"""
- try:
- request_data = await request.json()
- stream = request_data.get("stream", False)
-
- if stream:
- # Handle streaming response
- async def generate_stream():
- try:
- response = await router.make_request(request_data)
- for chunk in response:
- yield f"data: {json.dumps(chunk.model_dump())}\n\n"
- yield "data: [DONE]\n\n"
- except Exception as e:
- error_data = {"error": str(e)}
- yield f"data: {json.dumps(error_data)}\n\n"
-
- return StreamingResponse(
- generate_stream(),
- media_type="text/event-stream",
- headers={
- "Cache-Control": "no-cache",
- "Connection": "keep-alive"
- }
- )
- else:
- # Handle non-streaming response
- response = await router.make_request(request_data)
- return response.model_dump()
-
- except Exception as e:
- logger.error(f"Request processing failed: {str(e)}")
- raise HTTPException(status_code=500, detail=str(e))
-
-@app.post("/v1/switch-provider")
-async def switch_provider(request: Request):
- """Manually switch to a specific provider"""
- provider = await request.json()
-
- if provider not in router.providers:
- raise HTTPException(status_code=400, detail=f"Unknown provider: {provider}")
-
- if not router.providers[provider]["active"]:
- raise HTTPException(status_code=400, detail=f"Provider {provider} is not active")
-
- old_provider = router.current_provider
- router.current_provider = provider
-
- logger.info(f"Manually switched from {old_provider} to {provider}")
-
- return {
- "message": f"Switched from {old_provider} to {provider}",
- "current_provider": router.current_provider
- }
-
-@app.get("/v1/status")
-async def get_status():
- """Get current router status"""
- return {
- "current_provider": router.current_provider,
- "failover_count": router.failover_count,
- "last_failover": router.last_failover.isoformat() if router.last_failover else None,
- "last_health_check": router.last_health_check.isoformat() if router.last_health_check else None,
- "health_check_failures": router.health_check_failures,
- "providers": router.providers
- }
-
-@app.post("/v1/health-check")
-async def manual_health_check():
- """Manually trigger Claude Pro health check"""
- try:
- await router.health_check_claude_pro()
- return {
- "message": "Health check completed",
- "current_provider": router.current_provider,
- "last_health_check": router.last_health_check.isoformat() if router.last_health_check else None
- }
- except Exception as e:
- raise HTTPException(status_code=500, detail=f"Health check failed: {str(e)}")
-
-if __name__ == "__main__":
- import uvicorn
- uvicorn.run(app, host=config.host, port=config.port)
\ No newline at end of file
diff --git a/router/backup_docker.sh b/router/backup_docker.sh
deleted file mode 100755
index c8a0973..0000000
--- a/router/backup_docker.sh
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/bin/bash
-
-# Docker configurations backup script with retention policy
-# Retains: 3 daily, 3 weekly, 3 monthly backups
-
-BACKUP_DIR="/home/will/docker_backups"
-SOURCE_DIR="/home/will/docker"
-REPO_URL="https://git.will123song.xyz/will/docker-configs.git"
-DATE=$(date +%Y%m%d_%H%M%S)
-DAY_OF_WEEK=$(date +%u) # 1-7 (Monday is 1)
-DAY_OF_MONTH=$(date +%d)
-
-# Create backup directory if it doesn't exist
-mkdir -p "$BACKUP_DIR"
-
-# Function to create git backup
-create_git_backup() {
- echo "Creating git backup for $DATE"
- cd "$SOURCE_DIR"
-
- # Add any new files and commit changes
- git add .
- if git diff --cached --quiet; then
- echo "No changes to backup"
- else
- git commit -m "Automated backup - $DATE"
- git push origin master 2>/dev/null || echo "Failed to push to remote (may need manual setup)"
- fi
-}
-
-# Function to clean old backups
-cleanup_backups() {
- echo "Cleaning up old backups..."
- cd "$BACKUP_DIR"
-
- # Keep last 3 daily backups (delete older daily backups)
- ls -t daily_*.tar.gz 2>/dev/null | tail -n +4 | xargs rm -f 2>/dev/null
-
- # Keep last 3 weekly backups (delete older weekly backups)
- ls -t weekly_*.tar.gz 2>/dev/null | tail -n +4 | xargs rm -f 2>/dev/null
-
- # Keep last 3 monthly backups (delete older monthly backups)
- ls -t monthly_*.tar.gz 2>/dev/null | tail -n +4 | xargs rm -f 2>/dev/null
-
- echo "Cleanup completed"
-}
-
-# Create tar backup based on schedule
-create_tar_backup() {
- cd "$SOURCE_DIR"
-
- # Determine backup type
- if [ "$DAY_OF_MONTH" = "01" ]; then
- # Monthly backup on 1st of month
- BACKUP_TYPE="monthly"
- BACKUP_FILE="$BACKUP_DIR/monthly_$DATE.tar.gz"
- elif [ "$DAY_OF_WEEK" = "1" ]; then
- # Weekly backup on Monday
- BACKUP_TYPE="weekly"
- BACKUP_FILE="$BACKUP_DIR/weekly_$DATE.tar.gz"
- else
- # Daily backup
- BACKUP_TYPE="daily"
- BACKUP_FILE="$BACKUP_DIR/daily_$DATE.tar.gz"
- fi
-
- echo "Creating $BACKUP_TYPE backup: $BACKUP_FILE"
-
- # Create tar backup excluding data directories
- tar -czf "$BACKUP_FILE" \
- --exclude='*/data/*' \
- --exclude='*/postgres/*' \
- --exclude='*/vw-data/*' \
- --exclude='*/db_data/*' \
- --exclude='*/caddy_data/*' \
- --exclude='*/caddy_config/*' \
- --exclude='*/config/*' \
- --exclude='HA/config/*' \
- --exclude='HA/db_data/*' \
- --exclude='.git' \
- --exclude='gitea/postgres' \
- --exclude='HA/db_data' \
- --warning=no-file-changed \
- --warning=no-file-removed \
- . 2>/dev/null || true
-
- if [ -f "$BACKUP_FILE" ]; then
- echo "$BACKUP_TYPE backup created successfully: $BACKUP_FILE"
- ls -lh "$BACKUP_FILE"
- else
- echo "Error creating $BACKUP_TYPE backup"
- exit 1
- fi
-}
-
-# Main execution
-echo "Starting backup process at $(date)"
-
-# Create git backup
-create_git_backup
-
-# Create tar backup
-create_tar_backup
-
-# Clean up old backups
-cleanup_backups
-
-echo "Backup process completed at $(date)"
\ No newline at end of file
diff --git a/router/caddy/Caddyfile b/router/caddy/Caddyfile
deleted file mode 100644
index ff239e9..0000000
--- a/router/caddy/Caddyfile
+++ /dev/null
@@ -1,42 +0,0 @@
-# 使用容器名的Caddy配置
-
-jellyfin.will123song.xyz {
- reverse_proxy jellyfin:8096
-}
-
-portainer.will123song.xyz {
- reverse_proxy portainer:9000
-}
-
-git.will123song.xyz {
- reverse_proxy gitea:3000
-}
-
-adguard.will123song.xyz {
- reverse_proxy adguardhome:3000
-}
-bt.will123song.xyz {
- reverse_proxy qbittorrent:8080
-}
-
-vault.will123song.xyz {
- reverse_proxy vaultwarden:80
-}
-
-silly.will123song.xyz {
- reverse_proxy sillytavern:8000
-}
-
-ha.will123song.xyz, homeassistant.fossa-dinosaur.ts.net {
- reverse_proxy homeassistant:8123
-}
-
-# 本地测试端口
-:8090 {
- reverse_proxy jellyfin:8096
-}
-
-# 默认站点
-will123song.xyz, www.will123song.xyz {
- respond "Welcome to Will's Server! 🚀\n\nServices Available:\n- Jellyfin: http://localhost:8096\n- Portainer: http://localhost:9000\n- qBittorrent: http://localhost:18080\n- Vaultwarden: http://localhost:8081\n- AdGuard: http://localhost:3000\n- Gitea: http://localhost:13000\n- SillyTavern: http://localhost:8000\n- Home Assistant: http://localhost:8123"
-}
\ No newline at end of file
diff --git a/router/caddy/docker-compose.yml b/router/caddy/docker-compose.yml
deleted file mode 100644
index 34a3127..0000000
--- a/router/caddy/docker-compose.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-version: '3.8'
-
-services:
- caddy:
- image: caddy:latest
- container_name: caddy
- restart: unless-stopped
- ports:
- - "80:80"
- - "443:443"
- volumes:
- - ./Caddyfile:/etc/caddy/Caddyfile:ro
- - ./caddy_data:/data
- - ./caddy_config:/config
- networks:
- - caddy-network
-
-networks:
- caddy-network:
- driver: bridge
\ No newline at end of file
diff --git a/router/config.py b/router/config.py
deleted file mode 100644
index 3d2797c..0000000
--- a/router/config.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import os
-from typing import Optional
-from pydantic import BaseModel
-
-class Config(BaseModel):
- # Claude API configurations
- claude_pro_api_key: str = ""
- claude_api_key: str = ""
-
- # Router settings
- port: int = 8000
- host: str = "0.0.0.0"
-
- # Retry settings
- max_retries: int = 3
- retry_delay: float = 1.0
-
- # API endpoints
- claude_pro_base_url: str = "https://api.anthropic.com"
- claude_api_base_url: str = "https://api.anthropic.com"
-
- # Health check settings
- health_check_enabled: bool = True
- health_check_cron: str = "0-4 * * * *" # Every hour, first 5 minutes
- health_check_message: str = "ping"
- health_check_model: str = "claude-3-haiku-20240307" # Use cheapest model for checks
-
- def __init__(self, **kwargs):
- super().__init__(**kwargs)
- # Load from environment or token file
- self.load_from_env()
-
- def load_from_env(self):
- """Load configuration from environment variables or token file"""
- # Try environment variables first
- self.claude_api_key = os.getenv("CLAUDE_API_KEY", "")
-
- # Load from tokens.txt if not found in env
- if not self.claude_api_key:
- try:
- with open("/home/will/docker/tokens.txt", "r") as f:
- for line in f:
- if line.startswith("claude_API="):
- self.claude_api_key = line.split("=", 1)[1].strip()
- break
- except FileNotFoundError:
- pass
-
- # For MVP, we'll use the same API key for both pro and regular
- # In practice, Claude Pro might use a different endpoint or key
- self.claude_pro_api_key = self.claude_api_key
-
-# Global config instance
-config = Config()
\ No newline at end of file
diff --git a/router/discord_tokens.txt b/router/discord_tokens.txt
deleted file mode 100644
index 4be5373..0000000
--- a/router/discord_tokens.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-tu_discord_token=MTM2NTA1NzE0MjgyMTc0ODgyNw.G7_tcK.PAh7k9D7LiBhxYCZabjjzla9LmAyTxBoK-GObo
-chatgpt_discord_token=MTM5MDg5MTA3NjE1MTE0ODU0NA.GKZTKW.42yshmqD9KTdsAB2uRuymNdux-7b0udi-OdirI
-deepseek_discord_token=MTM4Njg4NDI3NzcwNjE2NjQ2Mw.GCPpjU.hKr60Pbs4B7kLR0sfTzMC7DwBbUDaxWYBqSXEY
-claude_discord_token=MTM5MDg4NzEyNTU2MTM4MDk0Ng.GXibjV.3TAc3PU-m_6F7dg5QNddYfMQxrlPIsNMH24nTY
-deepseek_API=sk-43e9e6afcef34472ade8e1db9c239c11
-chatgpt_API=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
-gitea_address=https://git.will123song.xyz/will/docker-configs.git
-gitea_token=92cf76d1bd8bf9303aff3bb9b475b73a0fe6bfd7
-deepseek_test_api=sk-30c54064e5154fbcaaa6e4675bae2995
-deepseek_model=deepseek-chat
-chatgpt_model=gpt-4o-mini-2024-07-18
-claude_API=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
-alphavantage_API=H1TNEAN9JONTFCY6
-VaultWarden_ADMIN_TOKEN=U2WwYJYRprFMpqxZdTpj6afU8VfBoGU0JSLvHE30WkbNMpAijHccDU1GPEI0/Bff
-
-# Discord Bot 邀请链接 (添加到其他服务器用)
-# 使用说明: 复制链接在浏览器中打开,选择目标服务器并授权
-
-# Tu Bot - 处理所有slash commands的机器人
-tu_client_id=1365057142821748827
-tu_invite_link=https://discord.com/api/oauth2/authorize?client_id=1365057142821748827&permissions=277025770544&scope=bot%20applications.commands
-
-# ChatGPT Bot - ChatGPT AI对话机器人
-chatgpt_client_id=1390891076151148544
-chatgpt_invite_link=https://discord.com/api/oauth2/authorize?client_id=1390891076151148544&permissions=274877908992&scope=bot%20applications.commands
-
-# DeepSeek Bot - DeepSeek AI对话机器人
-deepseek_client_id=1386884277706166463
-deepseek_invite_link=https://discord.com/api/oauth2/authorize?client_id=1386884277706166463&permissions=274877908992&scope=bot%20applications.commands
-
-# Claude Bot - Claude AI对话机器人
-claude_client_id=1390887125561380946
-claude_invite_link=https://discord.com/api/oauth2/authorize?client_id=1390887125561380946&permissions=274877908992&scope=bot%20applications.commands
-
diff --git a/router/docker-compose.yml b/router/docker-compose.yml
deleted file mode 100644
index cd212b3..0000000
--- a/router/docker-compose.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-version: '3.8'
-
-services:
- claude-router:
- build: .
- container_name: claude-router
- ports:
- - "8000:8000"
- environment:
- - CLAUDE_API_KEY=${CLAUDE_API_KEY}
- volumes:
- - /home/will/docker/tokens.txt:/home/will/docker/tokens.txt:ro
- restart: unless-stopped
- networks:
- - router-network
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
- interval: 30s
- timeout: 10s
- retries: 3
- start_period: 30s
-
-networks:
- router-network:
- driver: bridge
\ No newline at end of file
diff --git a/router/gitea/docker-compose.yml b/router/gitea/docker-compose.yml
deleted file mode 100644
index fea260b..0000000
--- a/router/gitea/docker-compose.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-services:
- gitea:
- image: gitea/gitea:latest
- container_name: gitea
- restart: unless-stopped
- environment:
- - USER_UID=1000
- - USER_GID=1000
- - GITEA__database__DB_TYPE=postgres
- - GITEA__database__HOST=db:5432
- - GITEA__database__NAME=gitea
- - GITEA__database__USER=gitea
- - GITEA__database__PASSWD=gitea
- volumes:
- - ./data:/data
- - /etc/timezone:/etc/timezone:ro
- - /etc/localtime:/etc/localtime:ro
- depends_on:
- - db
- networks:
- - caddy_caddy-network
-
- db:
- image: postgres:14
- container_name: gitea-db
- restart: unless-stopped
- environment:
- - POSTGRES_USER=gitea
- - POSTGRES_PASSWORD=gitea
- - POSTGRES_DB=gitea
- volumes:
- - ./postgres:/var/lib/postgresql/data
- networks:
- - caddy_caddy-network
-
-networks:
- caddy_caddy-network:
- external: true
\ No newline at end of file
diff --git a/router/recover_tokens.sh b/router/recover_tokens.sh
deleted file mode 100755
index d010199..0000000
--- a/router/recover_tokens.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-
-echo "=== Discord Tokens 恢复脚本 ==="
-
-# 检查数据分区
-echo "1. 检查数据分区状态..."
-if [ -d "/mnt/data" ]; then
- echo "✅ /mnt/data 目录存在"
-
- # 尝试访问备份文件
- if [ -f "/mnt/data/docker_backup_20250713_165104/containers/discordbot-v1.0.4_export.tar" ]; then
- echo "✅ 找到Discord备份文件"
-
- # 提取tokens.txt
- echo "2. 提取tokens.txt..."
- tar -xf "/mnt/data/docker_backup_20250713_165104/containers/discordbot-v1.0.4_export.tar" -C /tmp/ app/tokens.txt
-
- if [ -f "/tmp/app/tokens.txt" ]; then
- echo "✅ 成功提取tokens.txt"
-
- # 备份当前文件
- cp /home/will/docker/discord_tokens.txt /home/will/docker/discord_tokens.txt.backup
-
- # 恢复原始tokens
- cp /tmp/app/tokens.txt /home/will/docker/discord_tokens.txt
-
- echo "✅ tokens.txt已恢复!"
- echo "原始文件大小: $(stat -c%s /home/will/docker/discord_tokens.txt) 字节"
- echo "备份文件: /home/will/docker/discord_tokens.txt.backup"
-
- # 重启Discord bot
- echo "3. 重启Discord Bot..."
- docker compose restart discordbot
-
- else
- echo "❌ 无法提取tokens.txt"
- fi
- else
- echo "❌ 未找到备份文件"
- echo "请手动编辑 /home/will/docker/discord_tokens.txt 添加你的Discord bot tokens"
- fi
-else
- echo "❌ /mnt/data 目录不存在,数据分区可能未挂载"
- echo "请手动挂载数据分区或编辑tokens文件"
-fi
-
-echo ""
-echo "=== 手动恢复方法 ==="
-echo "如果自动恢复失败,请:"
-echo "1. 编辑文件: nano /home/will/docker/discord_tokens.txt"
-echo "2. 添加你的Discord bot tokens (每行一个)"
-echo "3. 重启Discord Bot: docker compose restart discordbot"
\ No newline at end of file
diff --git a/router/requirements.txt b/router/requirements.txt
deleted file mode 100644
index 26138b8..0000000
--- a/router/requirements.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-fastapi==0.104.1
-uvicorn==0.24.0
-httpx==0.25.2
-pydantic==2.5.0
-anthropic==0.7.8
-python-dotenv==1.0.0
-apscheduler==3.10.4
\ No newline at end of file
diff --git a/router/router/Dockerfile b/router/router/Dockerfile
deleted file mode 100644
index a679451..0000000
--- a/router/router/Dockerfile
+++ /dev/null
@@ -1,32 +0,0 @@
-FROM python:3.11-slim
-
-# Set working directory
-WORKDIR /app
-
-# Install system dependencies
-RUN apt-get update && apt-get install -y \
- curl \
- && rm -rf /var/lib/apt/lists/*
-
-# Copy requirements first for better caching
-COPY requirements.txt .
-
-# Install Python dependencies
-RUN pip install --no-cache-dir -r requirements.txt
-
-# Copy application code
-COPY . .
-
-# Create non-root user
-RUN useradd -m -u 1000 router && chown -R router:router /app
-USER router
-
-# Expose port
-EXPOSE 8000
-
-# Health check
-HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
- CMD curl -f http://localhost:8000/health || exit 1
-
-# Run the application
-CMD ["python", "app.py"]
\ No newline at end of file
diff --git a/router/router/README.md b/router/router/README.md
deleted file mode 100644
index 096384f..0000000
--- a/router/router/README.md
+++ /dev/null
@@ -1,238 +0,0 @@
-# Claude Router
-
-一个智能的Claude API路由器,支持Claude Pro和Claude API之间的自动故障转移。当Claude Pro达到使用限制时,自动切换到Claude API,确保服务的连续性。
-
-## 功能特性
-
-- **自动故障转移**: 检测到速率限制或使用限制时自动切换provider
-- **定时健康检查**: 每小时前5分钟自动检测Claude Pro限额恢复
-- **智能恢复**: 自动切换回Claude Pro,优先使用高级功能
-- **手动切换**: 支持手动切换到指定provider
-- **兼容Claude Code CLI**: 完全兼容Anthropic API格式
-- **Docker化部署**: 一键部署,开箱即用
-
-## 快速开始
-
-### 1. 使用Docker Compose部署
-
-```bash
-# 克隆或进入项目目录
-cd /home/will/docker/router
-
-# 构建并启动服务
-docker-compose up -d
-
-# 查看服务状态
-docker-compose ps
-```
-
-### 2. 验证服务运行
-
-```bash
-# 健康检查
-curl http://localhost:8000/health
-
-# 查看当前状态
-curl http://localhost:8000/v1/status
-```
-
-### 3. 配置Claude Code CLI
-
-设置环境变量将Claude Code CLI指向路由器:
-
-```bash
-# 设置API endpoint为路由器地址
-export ANTHROPIC_API_URL="http://localhost:8000"
-
-# 添加到bashrc使其永久生效
-echo 'export ANTHROPIC_API_URL="http://localhost:8000"' >> ~/.bashrc
-
-# 测试配置
-echo "Hello Claude Router" | claude --print
-```
-
-**注意**: 无需修改ANTHROPIC_API_KEY,路由器会自动处理API密钥。
-
-## API端点
-
-### 主要端点
-
-- `POST /v1/messages` - Claude API消息创建(兼容Anthropic API)
-- `GET /health` - 健康检查
-- `GET /v1/status` - 获取路由器状态
-- `POST /v1/switch-provider` - 手动切换provider
-- `POST /v1/health-check` - 手动触发Claude Pro健康检查
-
-### 健康检查响应示例
-
-```json
-{
- "status": "healthy",
- "current_provider": "claude_pro",
- "failover_count": 0,
- "last_failover": null,
- "last_health_check": "2025-07-14T19:00:00.000Z",
- "health_check_failures": 0,
- "providers": {
- "claude_pro": {"active": true},
- "claude_api": {"active": true}
- }
-}
-```
-
-## 配置说明
-
-### 环境变量
-
-- `CLAUDE_API_KEY`: Claude API密钥
-- `ROUTER_HOST`: 服务监听地址(默认: 0.0.0.0)
-- `ROUTER_PORT`: 服务监听端口(默认: 8000)
-- `MAX_RETRIES`: 最大重试次数(默认: 3)
-- `RETRY_DELAY`: 重试延迟(默认: 1.0秒)
-
-### 健康检查配置
-
-- `health_check_enabled`: 是否启用定时健康检查(默认: true)
-- `health_check_cron`: 检查时间表达式(默认: "0-4 * * * *" - 每小时前5分钟)
-- `health_check_message`: 测试消息内容(默认: "ping")
-- `health_check_model`: 使用的模型(默认: claude-3-haiku-20240307)
-
-### Token文件
-
-路由器会自动从 `/home/will/docker/tokens.txt` 读取API密钥,无需手动配置环境变量。
-
-## 故障转移机制
-
-当检测到以下错误时,路由器会自动切换到下一个可用的provider:
-
-- 429 (Too Many Requests)
-- 速率限制错误
-- 使用限制达到
-- "usage limit reached"相关错误
-
-**优先级顺序**: Claude Pro → Claude API
-
-## 使用示例
-
-### 基本API调用
-
-```bash
-curl -X POST http://localhost:8000/v1/messages \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer your_api_key" \
- -d '{
- "model": "claude-3-sonnet-20240229",
- "max_tokens": 1024,
- "messages": [
- {"role": "user", "content": "Hello, Claude!"}
- ]
- }'
-```
-
-### 手动切换provider
-
-```bash
-curl -X POST http://localhost:8000/v1/switch-provider \
- -H "Content-Type: application/json" \
- -d '"claude_api"'
-```
-
-### 手动健康检查
-
-```bash
-# 立即检测Claude Pro是否可用
-curl -X POST http://localhost:8000/v1/health-check
-
-# 查看详细状态
-curl http://localhost:8000/v1/status
-```
-
-## 开发和调试
-
-### 本地开发
-
-```bash
-# 创建虚拟环境
-python3 -m venv venv
-source venv/bin/activate
-
-# 安装依赖
-pip install -r requirements.txt
-
-# 运行应用
-python app.py
-```
-
-### 查看日志
-
-```bash
-# Docker容器日志
-docker-compose logs -f claude-router
-
-# 实时日志
-docker logs -f claude-router
-```
-
-## 故障排除
-
-### 常见问题
-
-1. **服务无法启动**
- - 检查tokens.txt文件是否存在且格式正确
- - 确认端口8000未被占用
-
-2. **API调用失败**
- - 验证API密钥是否有效
- - 检查网络连接到api.anthropic.com
-
-3. **自动切换不工作**
- - 查看日志确认错误检测逻辑
- - 确认backup provider配置正确
-
-### 监控
-
-- 健康检查: `http://localhost:8000/health`
-- 状态监控: `http://localhost:8000/v1/status`
-- Docker健康检查: `docker inspect claude-router`
-
-## 技术架构
-
-- **框架**: FastAPI + Uvicorn
-- **HTTP客户端**: httpx
-- **AI库**: anthropic
-- **容器化**: Docker + Docker Compose
-- **配置管理**: pydantic + python-dotenv
-
-## 版本信息
-
-- 版本: 1.0.0 (MVP)
-- Python: 3.11+
-- 支持: Claude-3 系列模型
-
-## 更新日志
-
-### v1.1.0 (2025-07-14)
-- ✅ 添加定时健康检查功能
-- ✅ 每小时前5分钟自动检测Claude Pro限额恢复
-- ✅ 智能自动切换回Claude Pro
-- ✅ 新增手动健康检查API
-- ✅ 完善日志记录和状态监控
-
-### v1.0.0 (2025-07-14)
-- ✅ 基础路由器功能
-- ✅ Claude Pro到Claude API自动故障转移
-- ✅ Docker容器化部署
-- ✅ Claude Code CLI兼容性
-
-## 后续开发计划
-
-- [ ] 添加DeepSeek API支持
-- [ ] 添加ChatGPT API支持
-- [ ] 实现请求统计和监控面板
-- [ ] 添加请求缓存功能
-- [ ] 支持负载均衡
-- [ ] 集成Kimi v2 API
-
-## 许可证
-
-MIT License
\ No newline at end of file
diff --git a/router/router/app.py b/router/router/app.py
deleted file mode 100644
index 9f0373e..0000000
--- a/router/router/app.py
+++ /dev/null
@@ -1,432 +0,0 @@
-import asyncio
-import json
-import logging
-from datetime import datetime
-from typing import Dict, Any, Optional
-from contextlib import asynccontextmanager
-
-import httpx
-from fastapi import FastAPI, Request, HTTPException
-from fastapi.responses import StreamingResponse, JSONResponse
-from anthropic import Anthropic
-from apscheduler.schedulers.asyncio import AsyncIOScheduler
-from apscheduler.triggers.cron import CronTrigger
-
-from config import config
-
-# Configure logging
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-class ClaudeRouter:
- def __init__(self):
- self.current_provider = "claude_pro"
- self.failover_count = 0
- self.last_failover = None
- self.last_health_check = None
- self.health_check_failures = 0
- self.scheduler = None
- # 按优先级顺序排列:Claude Pro > DeepSeek > Kimi (removed claude_api)
- from collections import OrderedDict
- self.providers = OrderedDict([
- ("claude_pro", {
- "api_key": config.claude_api_key, # Use claude_api_key for claude_pro
- "base_url": config.claude_pro_base_url,
- "type": "anthropic",
- "active": True
- }),
- ("deepseek", {
- "api_key": config.deepseek_api_key,
- "base_url": config.deepseek_base_url,
- "type": "openai",
- "active": True
- }),
- ("kimi", {
- "api_key": config.kimi_api_key,
- "base_url": config.kimi_base_url,
- "type": "openai",
- "active": True
- })
- ])
-
- async def get_anthropic_client(self, provider: str) -> Anthropic:
- """Get Anthropic client for the specified provider"""
- if provider not in self.providers:
- raise ValueError(f"Unknown provider: {provider}")
-
- provider_config = self.providers[provider]
- if provider_config["type"] != "anthropic":
- raise ValueError(f"Provider {provider} is not an Anthropic provider")
-
- return Anthropic(
- api_key=provider_config["api_key"],
- base_url=provider_config["base_url"]
- )
-
- async def make_openai_request(self, provider: str, request_data: Dict[str, Any]) -> Dict[str, Any]:
- """Make request to OpenAI-compatible API (like DeepSeek)"""
- if provider not in self.providers:
- raise ValueError(f"Unknown provider: {provider}")
-
- provider_config = self.providers[provider]
- if provider_config["type"] != "openai":
- raise ValueError(f"Provider {provider} is not an OpenAI-compatible provider")
-
- headers = {
- "Authorization": f"Bearer {provider_config['api_key']}",
- "Content-Type": "application/json"
- }
-
- # Convert Anthropic-style messages to OpenAI format
- messages = request_data.get("messages", [])
- openai_messages = []
- for msg in messages:
- openai_messages.append({
- "role": msg["role"],
- "content": msg["content"]
- })
-
- # 选择适当的默认模型
- if provider == "deepseek":
- default_model = "deepseek-reasoner"
- elif provider == "kimi":
- default_model = "kimi-k2"
- else:
- default_model = "gpt-3.5-turbo" # 通用OpenAI默认模型
-
- payload = {
- "model": request_data.get("model", default_model),
- "messages": openai_messages,
- "max_tokens": request_data.get("max_tokens", 4096),
- "stream": request_data.get("stream", False)
- }
-
- async with httpx.AsyncClient() as client:
- response = await client.post(
- f"{provider_config['base_url']}/v1/chat/completions",
- headers=headers,
- json=payload,
- timeout=60.0
- )
- response.raise_for_status()
- return response.json()
-
- async def should_failover(self, error: Exception) -> bool:
- """Determine if we should failover based on the error"""
- error_str = str(error).lower()
-
- # Check for rate limiting or usage limit errors
- failover_indicators = [
- "rate_limit",
- "usage limit",
- "quota exceeded",
- "429",
- "too many requests",
- "limit reached",
- "rate limit exceeded",
- "usage limit reached",
- "monthly limit exceeded",
- "daily limit exceeded"
- ]
-
- return any(indicator in error_str for indicator in failover_indicators)
-
- async def failover_to_next_provider(self):
- """Switch to the next available provider"""
- providers_list = list(self.providers.keys())
- current_index = providers_list.index(self.current_provider)
-
- # Try next provider
- for i in range(1, len(providers_list)):
- next_index = (current_index + i) % len(providers_list)
- next_provider = providers_list[next_index]
-
- if self.providers[next_provider]["active"]:
- logger.info(f"Failing over from {self.current_provider} to {next_provider}")
- self.current_provider = next_provider
- self.failover_count += 1
- self.last_failover = datetime.now()
- return True
-
- logger.error("No active providers available for failover")
- return False
-
- async def make_request(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
- """Make request with automatic failover"""
- max_attempts = len(self.providers)
-
- for attempt in range(max_attempts):
- try:
- provider_config = self.providers[self.current_provider]
- logger.info(f"Making request with provider: {self.current_provider} (type: {provider_config['type']})")
-
- # Route to appropriate client based on provider type
- if provider_config["type"] == "anthropic":
- response = await self._make_anthropic_request(request_data)
- elif provider_config["type"] == "openai":
- response = await self.make_openai_request(self.current_provider, request_data)
- else:
- raise ValueError(f"Unknown provider type: {provider_config['type']}")
-
- return response
-
- except Exception as e:
- logger.error(f"Request failed with {self.current_provider}: {str(e)}")
-
- if await self.should_failover(e) and attempt < max_attempts - 1:
- if await self.failover_to_next_provider():
- continue
-
- # If this is the last attempt or failover failed, raise the error
- if attempt == max_attempts - 1:
- raise HTTPException(status_code=500, detail=f"All providers failed. Last error: {str(e)}")
-
- raise HTTPException(status_code=500, detail="No providers available")
-
- async def _make_anthropic_request(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
- """Make request to Anthropic API"""
- client = await self.get_anthropic_client(self.current_provider)
-
- # Extract parameters from request
- messages = request_data.get("messages", [])
- model = request_data.get("model", "claude-3-sonnet-20240229")
- max_tokens = request_data.get("max_tokens", 4096)
- stream = request_data.get("stream", False)
-
- # Make the API call
- if hasattr(client, 'messages'):
- response = await asyncio.to_thread(
- client.messages.create,
- model=model,
- max_tokens=max_tokens,
- messages=messages,
- stream=stream
- )
- else:
- # For older anthropic versions
- response = await asyncio.to_thread(
- client.completions.create,
- model=model,
- max_tokens_to_sample=max_tokens,
- prompt=f"Human: {messages[0]['content']}\n\nAssistant:",
- stream=stream
- )
-
- return response
-
- async def health_check_primary_provider(self):
- """Check if primary provider (claude_pro) is available again"""
- primary_provider = "claude_pro"
-
- # Only check if we're not currently using the primary provider
- if self.current_provider == primary_provider:
- logger.debug(f"Skipping health check - already using {primary_provider}")
- return
-
- logger.info(f"Running {primary_provider} health check...")
- self.last_health_check = datetime.now()
-
- try:
- client = Anthropic(
- api_key=config.claude_api_key, # Use claude_api_key for claude_pro
- base_url=config.claude_pro_base_url
- )
-
- # Send a minimal test message
- if hasattr(client, 'messages'):
- response = await asyncio.to_thread(
- client.messages.create,
- model=config.health_check_model,
- max_tokens=10,
- messages=[{"role": "user", "content": config.health_check_message}]
- )
- else:
- # For older anthropic versions
- response = await asyncio.to_thread(
- client.completions.create,
- model=config.health_check_model,
- max_tokens_to_sample=10,
- prompt=f"Human: {config.health_check_message}\n\nAssistant:"
- )
-
- # If successful, switch back to primary provider
- old_provider = self.current_provider
- self.current_provider = primary_provider
- self.health_check_failures = 0
-
- logger.info(f"{primary_provider} health check successful! Switched from {old_provider} to {primary_provider}")
-
- except Exception as e:
- self.health_check_failures += 1
- error_str = str(e).lower()
-
- if any(indicator in error_str for indicator in ["rate_limit", "usage limit", "quota exceeded", "429", "too many requests", "limit reached"]):
- logger.info(f"{primary_provider} still rate limited: {str(e)}")
- else:
- logger.warning(f"{primary_provider} health check failed (attempt {self.health_check_failures}): {str(e)}")
-
- def start_scheduler(self):
- """Start the health check scheduler"""
- if not config.health_check_enabled:
- logger.info("Health check disabled in config")
- return
-
- self.scheduler = AsyncIOScheduler()
-
- # Schedule health check using cron expression
- self.scheduler.add_job(
- self.health_check_claude_pro,
- trigger=CronTrigger.from_crontab(config.health_check_cron),
- id="claude_pro_health_check",
- name="Claude Pro Health Check",
- misfire_grace_time=60
- )
-
- self.scheduler.start()
- logger.info(f"Health check scheduler started with cron: {config.health_check_cron}")
-
- def stop_scheduler(self):
- """Stop the health check scheduler"""
- if self.scheduler:
- self.scheduler.shutdown()
- logger.info("Health check scheduler stopped")
-
-# Initialize router
-router = ClaudeRouter()
-
-@asynccontextmanager
-async def lifespan(app: FastAPI):
- logger.info("Claude Router starting up...")
- logger.info(f"Current provider: {router.current_provider}")
-
- # Start health check scheduler
- router.start_scheduler()
-
- yield
-
- # Stop scheduler on shutdown
- router.stop_scheduler()
- logger.info("Claude Router shutting down...")
-
-app = FastAPI(
- title="Claude Router",
- description="Smart router for Claude API with automatic failover",
- version="1.0.0",
- lifespan=lifespan
-)
-
-@app.get("/health")
-async def health_check():
- """Health check endpoint"""
- return {
- "status": "healthy",
- "current_provider": router.current_provider,
- "failover_count": router.failover_count,
- "last_failover": router.last_failover.isoformat() if router.last_failover else None,
- "providers": {
- name: {"active": provider_config["active"]}
- for name, provider_config in router.providers.items()
- },
- "last_health_check": router.last_health_check.isoformat() if router.last_health_check else None,
- "health_check_failures": router.health_check_failures
- }
-
-@app.post("/v1/messages")
-async def create_message(request: Request):
- """Handle Claude API message creation with failover"""
- try:
- request_data = await request.json()
- stream = request_data.get("stream", False)
-
- if stream:
- # Handle streaming response
- async def generate_stream():
- try:
- response = await router.make_request(request_data)
- for chunk in response:
- yield f"data: {json.dumps(chunk.model_dump())}\n\n"
- yield "data: [DONE]\n\n"
- except Exception as e:
- error_data = {"error": str(e)}
- yield f"data: {json.dumps(error_data)}\n\n"
-
- return StreamingResponse(
- generate_stream(),
- media_type="text/event-stream",
- headers={
- "Cache-Control": "no-cache",
- "Connection": "keep-alive"
- }
- )
- else:
- # Handle non-streaming response
- response = await router.make_request(request_data)
-
- # Handle different response types
- if hasattr(response, 'model_dump'):
- # Anthropic response
- return response.model_dump()
- elif isinstance(response, dict):
- # OpenAI-compatible response (already a dict)
- return response
- else:
- # Fallback
- return response
-
- except Exception as e:
- logger.error(f"Request processing failed: {str(e)}")
- raise HTTPException(status_code=500, detail=str(e))
-
-@app.post("/v1/switch-provider")
-async def switch_provider(request: Request):
- """Manually switch to a specific provider"""
- try:
- request_data = await request.json()
- provider = request_data.get("provider") if isinstance(request_data, dict) else request_data
- except Exception:
- raise HTTPException(status_code=422, detail="Invalid JSON payload. Expected: {'provider': 'provider_name'}")
-
- if provider not in router.providers:
- raise HTTPException(status_code=400, detail=f"Unknown provider: {provider}")
-
- if not router.providers[provider]["active"]:
- raise HTTPException(status_code=400, detail=f"Provider {provider} is not active")
-
- old_provider = router.current_provider
- router.current_provider = provider
-
- logger.info(f"Manually switched from {old_provider} to {provider}")
-
- return {
- "message": f"Switched from {old_provider} to {provider}",
- "current_provider": router.current_provider
- }
-
-@app.get("/v1/status")
-async def get_status():
- """Get current router status"""
- return {
- "current_provider": router.current_provider,
- "failover_count": router.failover_count,
- "last_failover": router.last_failover.isoformat() if router.last_failover else None,
- "last_health_check": router.last_health_check.isoformat() if router.last_health_check else None,
- "health_check_failures": router.health_check_failures,
- "providers": router.providers
- }
-
-@app.post("/v1/health-check")
-async def manual_health_check():
- """Manually trigger Claude Pro health check"""
- try:
- await router.health_check_claude_pro()
- return {
- "message": "Health check completed",
- "current_provider": router.current_provider,
- "last_health_check": router.last_health_check.isoformat() if router.last_health_check else None
- }
- except Exception as e:
- raise HTTPException(status_code=500, detail=f"Health check failed: {str(e)}")
-
-if __name__ == "__main__":
- import uvicorn
- uvicorn.run(app, host=config.host, port=config.port)
\ No newline at end of file
diff --git a/router/router/config.py b/router/router/config.py
deleted file mode 100644
index bd420c6..0000000
--- a/router/router/config.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import os
-from typing import Optional
-from pydantic import BaseModel
-
-class Config(BaseModel):
- # Claude API configurations
- claude_pro_api_key: str = ""
- claude_api_key: str = ""
- deepseek_api_key: str = ""
- kimi_api_key: str = ""
-
- # Router settings
- port: int = 8000
- host: str = "0.0.0.0"
-
- # Retry settings
- max_retries: int = 3
- retry_delay: float = 1.0
-
- # API endpoints
- claude_pro_base_url: str = "https://api.anthropic.com" # Claude Pro might use different endpoint in future
- claude_api_base_url: str = "https://api.anthropic.com"
- deepseek_base_url: str = "https://api.deepseek.com"
- kimi_base_url: str = "https://api.moonshot.ai"
-
- # Health check settings
- health_check_enabled: bool = True
- health_check_cron: str = "0-4 * * * *" # Every hour, first 5 minutes
- health_check_message: str = "ping"
- health_check_model: str = "claude-3-haiku-20240307" # Use cheapest model for checks
- deepseek_health_check_model: str = "deepseek-reasoner"
- kimi_health_check_model: str = "Kimi-K2-Instruct"
-
- def __init__(self, **kwargs):
- super().__init__(**kwargs)
- # Load from environment or token file
- self.load_from_env()
-
- def load_from_env(self):
- """Load configuration from environment variables or token file"""
- # Try environment variables first
- self.claude_api_key = os.getenv("CLAUDE_API_KEY", "")
- self.deepseek_api_key = os.getenv("DEEPSEEK_API_KEY", "")
- self.kimi_api_key = os.getenv("KIMI_API_KEY", "")
-
- # Load from tokens.txt if not found in env
- if not self.claude_api_key or not self.deepseek_api_key or not self.kimi_api_key:
- try:
- with open("/home/will/docker/tokens.txt", "r") as f:
- for line in f:
- if line.startswith("claude_API="):
- self.claude_api_key = line.split("=", 1)[1].strip()
- elif line.startswith("deepseek_API="):
- self.deepseek_api_key = line.split("=", 1)[1].strip()
- elif line.startswith("kimi_API="):
- self.kimi_api_key = line.split("=", 1)[1].strip()
- except FileNotFoundError:
- pass
-
- # Claude Pro uses the same API key as regular Claude API
- # but may use different endpoint or have different rate limits
- self.claude_pro_api_key = self.claude_api_key
-
-# Global config instance
-config = Config()
\ No newline at end of file
diff --git a/router/router/docker-compose.yml b/router/router/docker-compose.yml
deleted file mode 100644
index cd212b3..0000000
--- a/router/router/docker-compose.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-version: '3.8'
-
-services:
- claude-router:
- build: .
- container_name: claude-router
- ports:
- - "8000:8000"
- environment:
- - CLAUDE_API_KEY=${CLAUDE_API_KEY}
- volumes:
- - /home/will/docker/tokens.txt:/home/will/docker/tokens.txt:ro
- restart: unless-stopped
- networks:
- - router-network
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
- interval: 30s
- timeout: 10s
- retries: 3
- start_period: 30s
-
-networks:
- router-network:
- driver: bridge
\ No newline at end of file
diff --git a/router/router/requirements.txt b/router/router/requirements.txt
deleted file mode 100644
index 26138b8..0000000
--- a/router/router/requirements.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-fastapi==0.104.1
-uvicorn==0.24.0
-httpx==0.25.2
-pydantic==2.5.0
-anthropic==0.7.8
-python-dotenv==1.0.0
-apscheduler==3.10.4
\ No newline at end of file
diff --git a/router/vault/docker-compose.yml b/router/vault/docker-compose.yml
deleted file mode 100644
index d408630..0000000
--- a/router/vault/docker-compose.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-services:
- vaultwarden:
- image: vaultwarden/server:latest
- container_name: vaultwarden
- restart: unless-stopped
- environment:
- - WEBSOCKET_ENABLED=true
- - SIGNUPS_ALLOWED=true
- - INVITATIONS_ALLOWED=true
- - ADMIN_TOKEN=U2WwYJYRprFMpqxZdTpj6afU8VfBoGU0JSLvHE30WkbNMpAijHccDU1GPEI0/Bff
- - DOMAIN=https://vault.will123song.xyz
- volumes:
- - ./vw-data:/data
- networks:
- - caddy_caddy-network
-
-networks:
- caddy_caddy-network:
- external: true
\ No newline at end of file
diff --git a/shutdown_docker_services.sh b/shutdown_docker_services.sh
new file mode 100755
index 0000000..cb295a9
--- /dev/null
+++ b/shutdown_docker_services.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+# Docker 服务关机前停止脚本
+# 用于在系统重启/关机前优雅地停止所有 Docker 服务
+
+echo "$(date): 开始停止 Docker 服务..." | tee -a /var/log/docker-shutdown.log
+
+# Docker 服务目录列表(除了 gitea)
+DOCKER_DIRS=(
+ "/home/will/docker/caddy"
+ "/home/will/docker/navidrome"
+ "/home/will/docker/jellyfin"
+ "/home/will/docker/1panel"
+ "/home/will/docker/silly"
+ "/home/will/docker/vault"
+ "/home/will/docker/HA"
+ "/home/will/docker/clove"
+ "/home/will/docker/discord_bot"
+ "/home/will/docker/discord_bot/ai_bots"
+ "/home/will/docker/adwireguard"
+ # "/home/will/docker/backtest" # 手动启动
+)
+
+# 函数:优雅停止 Docker Compose 服务
+stop_docker_service() {
+ local dir="$1"
+
+ if [[ -f "$dir/docker-compose.yml" ]]; then
+ echo "$(date): 停止服务: $dir" | tee -a /var/log/docker-shutdown.log
+ cd "$dir"
+
+ # 使用 timeout 防止卡住
+ timeout 60 docker compose down
+
+ if [[ $? -eq 0 ]]; then
+ echo "$(date): 成功停止: $dir" | tee -a /var/log/docker-shutdown.log
+ else
+ echo "$(date): 停止失败或超时: $dir" | tee -a /var/log/docker-shutdown.log
+ # 强制停止
+ docker compose kill
+ fi
+ else
+ echo "$(date): 跳过 (无 docker-compose.yml): $dir" | tee -a /var/log/docker-shutdown.log
+ fi
+}
+
+# 停止所有服务
+for dir in "${DOCKER_DIRS[@]}"; do
+ if [[ -d "$dir" ]]; then
+ stop_docker_service "$dir"
+ else
+ echo "$(date): 目录不存在: $dir" | tee -a /var/log/docker-shutdown.log
+ fi
+
+ # 每个服务之间等待2秒
+ sleep 2
+done
+
+# 清理孤立的容器和网络
+echo "$(date): 清理孤立的容器和网络..." | tee -a /var/log/docker-shutdown.log
+docker container prune -f
+docker network prune -f
+
+# 停止 Docker 守护进程(可选)
+echo "$(date): 停止 Docker 守护进程..." | tee -a /var/log/docker-shutdown.log
+systemctl stop docker
+
+echo "$(date): Docker 服务停止完成" | tee -a /var/log/docker-shutdown.log
\ No newline at end of file
diff --git a/startup_docker_services.sh b/startup_docker_services.sh
new file mode 100755
index 0000000..ed09727
--- /dev/null
+++ b/startup_docker_services.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+# Docker 服务开机后启动脚本
+# 用于在系统启动后自动启动所有 Docker 服务
+
+echo "$(date): 开始启动 Docker 服务..." | tee -a /var/log/docker-startup.log
+
+# 等待 Docker 守护进程启动
+echo "$(date): 等待 Docker 守护进程启动..." | tee -a /var/log/docker-startup.log
+while ! docker info >/dev/null 2>&1; do
+ echo "$(date): Docker 守护进程尚未就绪,等待5秒..." | tee -a /var/log/docker-startup.log
+ sleep 5
+done
+
+echo "$(date): Docker 守护进程已就绪" | tee -a /var/log/docker-startup.log
+
+# Docker 服务目录列表(按优先级排序)
+DOCKER_DIRS=(
+ "/home/will/docker/caddy" # 反向代理,最先启动
+ "/home/will/docker/vault" # 密码管理
+ "/home/will/docker/HA" # Home Assistant
+ "/home/will/docker/clove" # 文件管理
+ "/home/will/docker/navidrome" # 音乐服务
+ "/home/will/docker/jellyfin" # 媒体服务
+ "/home/will/docker/1panel" # 管理面板
+ "/home/will/docker/silly" # AI 对话
+ "/home/will/docker/discord_bot" # Discord 主机器人
+ "/home/will/docker/discord_bot/ai_bots" # AI 机器人(如果需要)
+ "/home/will/docker/adwireguard" # VPN 服务
+ # "/home/will/docker/backtest" # 回测服务 - 手动启动
+)
+
+# 函数:启动 Docker Compose 服务
+start_docker_service() {
+ local dir="$1"
+
+ if [[ -f "$dir/docker-compose.yml" ]]; then
+ echo "$(date): 启动服务: $dir" | tee -a /var/log/docker-startup.log
+ cd "$dir"
+
+ # 使用 timeout 防止卡住,adwireguard 需要更长时间
+ if [[ "$dir" == *"adwireguard"* ]]; then
+ timeout 180 docker compose up -d
+ else
+ timeout 120 docker compose up -d
+ fi
+
+ if [[ $? -eq 0 ]]; then
+ echo "$(date): 成功启动: $dir" | tee -a /var/log/docker-startup.log
+ else
+ echo "$(date): 启动失败或超时: $dir" | tee -a /var/log/docker-startup.log
+ fi
+ else
+ echo "$(date): 跳过 (无 docker-compose.yml): $dir" | tee -a /var/log/docker-startup.log
+ fi
+}
+
+# 启动所有服务
+for dir in "${DOCKER_DIRS[@]}"; do
+ if [[ -d "$dir" ]]; then
+ start_docker_service "$dir"
+ else
+ echo "$(date): 目录不存在: $dir" | tee -a /var/log/docker-startup.log
+ fi
+
+ # 每个服务之间等待5秒,让服务有时间启动
+ sleep 5
+done
+
+# 清理未使用的资源
+echo "$(date): 清理未使用的Docker资源..." | tee -a /var/log/docker-startup.log
+docker image prune -f
+
+echo "$(date): Docker 服务启动完成" | tee -a /var/log/docker-startup.log
+
+# 显示运行状态
+echo "$(date): 当前运行的容器:" | tee -a /var/log/docker-startup.log
+docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | tee -a /var/log/docker-startup.log
\ No newline at end of file