From 2983078c8172a30eee48219dd3cff8fc7e92fa1a Mon Sep 17 00:00:00 2001 From: Will Song Date: Wed, 23 Jul 2025 00:00:01 -0500 Subject: [PATCH] Automated backup - 20250723_000001 --- caddy/Caddyfile | 6 +- gemini-balance | 1 + simple-gemini-proxy/.env.example | 8 ++ simple-gemini-proxy/PROJECT_SUMMARY.md | 93 ++++++++++++++++ simple-gemini-proxy/QUICKSTART.md | 66 ++++++++++++ simple-gemini-proxy/README.md | 142 +++++++++++++++++++++++++ simple-gemini-proxy/check-status.sh | 51 +++++++++ simple-gemini-proxy/docker-compose.yml | 18 ++++ simple-gemini-proxy/simple-test.sh | 49 +++++++++ simple-gemini-proxy/test-api.sh | 60 +++++++++++ tokens.txt | 1 + 11 files changed, 494 insertions(+), 1 deletion(-) create mode 160000 gemini-balance create mode 100644 simple-gemini-proxy/.env.example create mode 100644 simple-gemini-proxy/PROJECT_SUMMARY.md create mode 100644 simple-gemini-proxy/QUICKSTART.md create mode 100644 simple-gemini-proxy/README.md create mode 100755 simple-gemini-proxy/check-status.sh create mode 100644 simple-gemini-proxy/docker-compose.yml create mode 100755 simple-gemini-proxy/simple-test.sh create mode 100755 simple-gemini-proxy/test-api.sh diff --git a/caddy/Caddyfile b/caddy/Caddyfile index bafc3f7..9b72e4d 100644 --- a/caddy/Caddyfile +++ b/caddy/Caddyfile @@ -31,6 +31,10 @@ clove.will123song.xyz { reverse_proxy clove:5201 } +gemeni-balance.will123song.xyz { + reverse_proxy gemini-balance:8000 +} + ha.will123song.xyz, homeassistant.fossa-dinosaur.ts.net { reverse_proxy 192.168.50.92:8123 } @@ -42,5 +46,5 @@ ha.will123song.xyz, homeassistant.fossa-dinosaur.ts.net { # 默认站点 will123song.xyz, www.will123song.xyz { - respond "Welcome to Will's Server! 🚀\n\nServices Available:\n- Jellyfin: http://localhost:8096\n- Portainer: http://localhost:9000\n- qBittorrent: http://localhost:18080\n- Vaultwarden: http://localhost:8081\n- AdGuard: http://localhost:3000\n- Gitea: http://localhost:13000\n- SillyTavern: http://localhost:8000\n- Home Assistant: http://localhost:8123" + respond "Welcome to Will's Server! 🚀\n\nServices Available:\n- Jellyfin: http://localhost:8096\n- Portainer: http://localhost:9000\n- qBittorrent: http://localhost:18080\n- Vaultwarden: http://localhost:8081\n- AdGuard: http://localhost:3000\n- Gitea: http://localhost:13000\n- SillyTavern: http://localhost:8000\n- Home Assistant: http://localhost:8123\n- Gemini Balance: http://localhost:8001" } \ No newline at end of file diff --git a/gemini-balance b/gemini-balance new file mode 160000 index 0000000..b25cf7d --- /dev/null +++ b/gemini-balance @@ -0,0 +1 @@ +Subproject commit b25cf7d978f708a7360d33cbb788e3546705ab9b diff --git a/simple-gemini-proxy/.env.example b/simple-gemini-proxy/.env.example new file mode 100644 index 0000000..b7a1a4f --- /dev/null +++ b/simple-gemini-proxy/.env.example @@ -0,0 +1,8 @@ +# Gemini API Key - 从 https://ai.google.dev 获取 +GEMINI_API_KEY=your_gemini_api_key_here + +# 可选配置 +DISABLE_MODEL_MAPPING=0 +GPT_4=gemini-1.5-pro-latest +GPT_3_5_TURBO=gemini-1.5-flash-latest +GPT_4_VISION_PREVIEW=gemini-1.5-flash-latest \ No newline at end of file diff --git a/simple-gemini-proxy/PROJECT_SUMMARY.md b/simple-gemini-proxy/PROJECT_SUMMARY.md new file mode 100644 index 0000000..a1da012 --- /dev/null +++ b/simple-gemini-proxy/PROJECT_SUMMARY.md @@ -0,0 +1,93 @@ +# 简单Gemini代理项目总结 + +## 🎯 项目目标完成情况 + +✅ **不需要数据库** - 完全无状态服务,无需任何数据库设置 +✅ **配置极其简单** - 只需要Gemini API密钥即可使用 +✅ **支持Docker部署** - 一键启动,使用官方Docker镜像 +✅ **能够立即工作** - 无需复杂配置,开箱即用 + +## 📁 项目结构 + +``` +/home/will/docker/simple-gemini-proxy/ +├── docker-compose.yml # Docker编排文件 +├── README.md # 详细说明文档 +├── QUICKSTART.md # 快速开始指南 +├── .env.example # 环境变量示例 +├── simple-test.sh # 简单测试脚本 +├── test-api.sh # 完整API测试脚本 +├── check-status.sh # 服务状态检查脚本 +└── PROJECT_SUMMARY.md # 项目总结(本文件) +``` + +## 🚀 使用方法 + +### 1. 启动服务 +```bash +cd /home/will/docker/simple-gemini-proxy +docker compose up -d +``` + +### 2. 测试服务 +```bash +# 使用你的Gemini API密钥 +./simple-test.sh YOUR_GEMINI_API_KEY +``` + +### 3. 检查状态 +```bash +./check-status.sh +``` + +## 🔧 技术实现 + +- **基础镜像**: `zhu327/gemini-openai-proxy:latest` +- **服务端口**: 8081 (映射到容器内部8080) +- **API兼容性**: 完全兼容OpenAI API格式 +- **模型映射**: 自动将GPT模型映射到Gemini模型 + +## 📋 支持的功能 + +- ✅ OpenAI ChatCompletion API格式 +- ✅ 流式响应支持 +- ✅ 多种模型映射 +- ✅ 自动重启机制 +- ✅ 健康检查 + +## 🔑 模型映射 + +| OpenAI模型 | Gemini模型 | +|------------|------------| +| gpt-3.5-turbo | gemini-1.5-flash-latest | +| gpt-4 | gemini-1.5-pro-latest | +| gpt-4-vision-preview | gemini-1.5-flash-latest | + +## 💡 使用场景 + +这个代理特别适合: + +1. **现有OpenAI应用迁移** - 无需修改代码,直接切换到Gemini +2. **开发测试** - 快速搭建测试环境 +3. **API统一** - 为不同的AI服务提供统一接口 +4. **成本优化** - 使用免费的Gemini API替代付费的OpenAI API + +## 🎉 项目优势 + +1. **极简部署** - 一个命令启动整个服务 +2. **零配置** - 除了API密钥外无需任何配置 +3. **即插即用** - 立即可以接受OpenAI格式的请求 +4. **生产就绪** - 包含健康检查和自动重启 +5. **完整文档** - 提供详细的使用说明和测试脚本 + +## 🔗 相关链接 + +- **获取API密钥**: https://ai.google.dev +- **原项目地址**: https://github.com/zhu327/gemini-openai-proxy +- **Docker Hub**: https://hub.docker.com/r/zhu327/gemini-openai-proxy + +## ✨ 部署成功! + +项目已成功部署在 `/home/will/docker/simple-gemini-proxy/`,服务运行在 `http://localhost:8081`。 + +只需要一个Gemini API密钥,就可以立即开始使用! \ No newline at end of file diff --git a/simple-gemini-proxy/QUICKSTART.md b/simple-gemini-proxy/QUICKSTART.md new file mode 100644 index 0000000..e027efc --- /dev/null +++ b/simple-gemini-proxy/QUICKSTART.md @@ -0,0 +1,66 @@ +# 快速开始指南 + +## 1分钟快速部署 + +### 第一步:获取API密钥 +访问 [Google AI Studio](https://ai.google.dev) 并创建免费的Gemini API密钥 + +### 第二步:启动服务 +```bash +cd /home/will/docker/simple-gemini-proxy +docker compose up -d +``` + +### 第三步:测试服务 +用你的API密钥替换 `YOUR_API_KEY` 后运行: + +```bash +# 基本测试 +curl http://localhost:8081/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_API_KEY" \ + -d '{ + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": "Hello!"}] + }' +``` + +### 使用我们的测试脚本 +```bash +./test-api.sh YOUR_API_KEY +``` + +## 服务信息 + +- **服务地址**: http://localhost:8081 +- **OpenAI API兼容端点**: http://localhost:8081/v1/chat/completions +- **容器名称**: simple-gemini-proxy +- **镜像**: zhu327/gemini-openai-proxy:latest + +## 常用命令 + +```bash +# 查看服务状态 +docker compose ps + +# 查看日志 +docker compose logs -f + +# 停止服务 +docker compose down + +# 重启服务 +docker compose restart +``` + +## 支持的模型映射 + +| OpenAI模型 | Gemini模型 | +|------------|------------| +| gpt-3.5-turbo | gemini-1.5-flash-latest | +| gpt-4 | gemini-1.5-pro-latest | +| gpt-4-vision-preview | gemini-1.5-flash-latest | + +## 就这么简单! + +这个代理现在可以接受任何OpenAI API格式的请求,并将其转发给Google Gemini API。无需数据库,无需复杂配置,完全即插即用! \ No newline at end of file diff --git a/simple-gemini-proxy/README.md b/simple-gemini-proxy/README.md new file mode 100644 index 0000000..fab5103 --- /dev/null +++ b/simple-gemini-proxy/README.md @@ -0,0 +1,142 @@ +# Simple Gemini to OpenAI API Proxy + +这是一个极其简单的Gemini到OpenAI API格式的代理服务,基于 `zhu327/gemini-openai-proxy` 项目。 + +## 特点 + +- ✅ 无需数据库 +- ✅ 配置极其简单,只需要API密钥 +- ✅ 支持Docker部署 +- ✅ 立即可用,无需复杂配置 +- ✅ 支持OpenAI API格式调用Gemini模型 + +## 快速开始 + +### 1. 获取Gemini API密钥 + +访问 [Google AI Studio](https://ai.google.dev) 获取免费的Gemini API密钥。 + +### 2. 启动服务 + +```bash +# 启动代理服务 +docker-compose up -d + +# 查看日志 +docker-compose logs -f +``` + +### 3. 测试服务 + +```bash +# 使用你的Gemini API密钥替换 YOUR_GEMINI_API_KEY +curl http://localhost:8081/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_GEMINI_API_KEY" \ + -d '{ + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": "你好!"}], + "temperature": 0.7 + }' +``` + +## 使用说明 + +### API端点 + +- **基础URL**: `http://localhost:8081` +- **聊天完成**: `POST /v1/chat/completions` + +### 模型映射 + +默认的模型映射: +- `gpt-3.5-turbo` → `gemini-1.5-flash-latest` +- `gpt-4` → `gemini-1.5-pro-latest` +- `gpt-4-vision-preview` → `gemini-1.5-flash-latest` + +### 环境变量配置 + +在 `docker-compose.yml` 中可以配置: + +- `DISABLE_MODEL_MAPPING=1`: 禁用模型映射,直接使用Gemini模型名称 +- `GPT_4=gemini-1.5-pro-latest`: 自定义模型映射 + +## 与各种客户端集成 + +### 使用curl + +```bash +curl http://localhost:8081/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_GEMINI_API_KEY" \ + -d '{ + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": "Hello!"}] + }' +``` + +### 使用Python OpenAI库 + +```python +import openai + +client = openai.OpenAI( + api_key="YOUR_GEMINI_API_KEY", + base_url="http://localhost:8081/v1" +) + +response = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Hello!"}] +) + +print(response.choices[0].message.content) +``` + +## 管理命令 + +```bash +# 启动服务 +docker-compose up -d + +# 停止服务 +docker-compose down + +# 重启服务 +docker-compose restart + +# 查看状态 +docker-compose ps + +# 查看日志 +docker-compose logs -f gemini-proxy +``` + +## 注意事项 + +1. **API密钥安全**: 请确保妥善保管你的Gemini API密钥 +2. **网络访问**: 默认绑定到8081端口,如需外部访问请配置防火墙 +3. **速率限制**: 遵循Google Gemini API的速率限制 +4. **生产环境**: 如用于生产环境,建议添加认证和HTTPS + +## 故障排除 + +### 容器无法启动 +```bash +# 检查端口是否被占用 +sudo netstat -tlnp | grep 8081 + +# 查看详细错误日志 +docker-compose logs gemini-proxy +``` + +### API调用失败 +1. 检查API密钥是否正确 +2. 确认网络连接正常 +3. 检查请求格式是否符合OpenAI API规范 + +## 项目信息 + +- **基于项目**: [zhu327/gemini-openai-proxy](https://github.com/zhu327/gemini-openai-proxy) +- **Docker镜像**: `zhu327/gemini-openai-proxy:latest` +- **支持的模型**: Gemini 1.5 Pro, Gemini 1.5 Flash \ No newline at end of file diff --git a/simple-gemini-proxy/check-status.sh b/simple-gemini-proxy/check-status.sh new file mode 100755 index 0000000..ca3a7ce --- /dev/null +++ b/simple-gemini-proxy/check-status.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +echo "🔍 检查Gemini代理服务状态..." +echo "" + +# 检查Docker是否运行 +if ! docker info > /dev/null 2>&1; then + echo "❌ Docker服务未运行" + exit 1 +fi + +# 检查容器状态 +echo "📦 Docker容器状态:" +docker compose ps + +echo "" + +# 检查端口 +echo "🌐 端口占用情况:" +if netstat -tln | grep -q ":8081"; then + echo "✅ 端口8081正在监听" +else + echo "❌ 端口8081未监听" +fi + +echo "" + +# 检查服务响应 +echo "🔗 服务连通性测试:" +if curl -s --max-time 5 http://localhost:8081/ > /dev/null; then + echo "✅ 服务正常响应" + + # 获取服务响应 + response=$(curl -s http://localhost:8081/) + echo "📋 服务响应: $response" +else + echo "❌ 服务无响应" +fi + +echo "" + +# 显示服务日志的最后几行 +echo "📝 最近的服务日志:" +docker compose logs --tail=5 gemini-proxy + +echo "" +echo "🎯 使用说明:" +echo "1. 获取Gemini API密钥: https://ai.google.dev" +echo "2. 运行测试: ./simple-test.sh YOUR_API_KEY" +echo "3. 服务地址: http://localhost:8081" +echo "4. API端点: http://localhost:8081/v1/chat/completions" \ No newline at end of file diff --git a/simple-gemini-proxy/docker-compose.yml b/simple-gemini-proxy/docker-compose.yml new file mode 100644 index 0000000..305f164 --- /dev/null +++ b/simple-gemini-proxy/docker-compose.yml @@ -0,0 +1,18 @@ +version: '3.8' + +services: + gemini-proxy: + image: googlegemini/proxy-to-gemini:latest + container_name: simple-gemini-proxy + ports: + - "8002:5555" + environment: + - GEMINI_API_KEY=${GEMINI_API_KEY} + restart: unless-stopped + networks: + - caddy_caddy-network + - default + +networks: + caddy_caddy-network: + external: true \ No newline at end of file diff --git a/simple-gemini-proxy/simple-test.sh b/simple-gemini-proxy/simple-test.sh new file mode 100755 index 0000000..df662c6 --- /dev/null +++ b/simple-gemini-proxy/simple-test.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# 最简单的测试脚本 +# 使用方法: ./simple-test.sh YOUR_GEMINI_API_KEY + +if [ -z "$1" ]; then + echo "❌ 错误: 需要提供Gemini API密钥" + echo "使用方法: $0 YOUR_GEMINI_API_KEY" + echo "" + echo "获取API密钥: https://ai.google.dev" + exit 1 +fi + +API_KEY=$1 +echo "🚀 测试简单Gemini代理..." +echo "📍 服务地址: http://localhost:8081" +echo "🔑 API Key: ${API_KEY:0:8}..." +echo "" + +# 简单聊天测试 +echo "💬 发送测试消息..." +response=$(curl -s http://localhost:8081/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $API_KEY" \ + -d '{ + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": "你好!请简单回复。"}], + "max_tokens": 50 + }') + +echo "📥 服务器响应:" +echo "$response" +echo "" + +# 检查响应是否包含choices +if echo "$response" | grep -q '"choices"'; then + echo "✅ 测试成功!代理服务正常工作。" +else + echo "❌ 测试失败!可能的原因:" + echo " - API密钥无效" + echo " - 网络连接问题" + echo " - 服务未正常启动" +fi + +echo "" +echo "🔧 服务管理命令:" +echo " docker compose ps # 查看状态" +echo " docker compose logs -f # 查看日志" +echo " docker compose restart # 重启服务" \ No newline at end of file diff --git a/simple-gemini-proxy/test-api.sh b/simple-gemini-proxy/test-api.sh new file mode 100755 index 0000000..df46070 --- /dev/null +++ b/simple-gemini-proxy/test-api.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +# 简单的API测试脚本 +# 使用方法: ./test-api.sh YOUR_GEMINI_API_KEY + +if [ -z "$1" ]; then + echo "使用方法: $0 YOUR_GEMINI_API_KEY" + echo "示例: $0 AIzaSyDxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + exit 1 +fi + +API_KEY=$1 +BASE_URL="http://localhost:8081" + +echo "Testing Gemini OpenAI Proxy..." +echo "API Key: ${API_KEY:0:10}..." +echo "Base URL: $BASE_URL" +echo + +# 测试基本聊天完成 +echo "=== 测试基本聊天完成 ===" +curl -s "$BASE_URL/v1/chat/completions" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $API_KEY" \ + -d '{ + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": "你好!请用中文回复。"}], + "temperature": 0.7, + "max_tokens": 150 + }' | jq '.' + +echo -e "\n" + +# 测试GPT-4模型 +echo "=== 测试GPT-4模型 ===" +curl -s "$BASE_URL/v1/chat/completions" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $API_KEY" \ + -d '{ + "model": "gpt-4", + "messages": [{"role": "user", "content": "解释一下量子计算的基本原理"}], + "temperature": 0.3, + "max_tokens": 200 + }' | jq '.' + +echo -e "\n" + +# 测试流式响应 +echo "=== 测试流式响应 ===" +curl -s "$BASE_URL/v1/chat/completions" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $API_KEY" \ + -d '{ + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": "数数从1到10"}], + "stream": true, + "temperature": 0.1 + }' + +echo -e "\n\n=== 测试完成 ===" \ No newline at end of file diff --git a/tokens.txt b/tokens.txt index f8c16e5..61b1970 100644 --- a/tokens.txt +++ b/tokens.txt @@ -14,6 +14,7 @@ alphavantage_API=H1TNEAN9JONTFCY6 VaultWarden_ADMIN_TOKEN=U2WwYJYRprFMpqxZdTpj6afU8VfBoGU0JSLvHE30WkbNMpAijHccDU1GPEI0/Bff kimi_API_Base_URL=https://api.moonshot.ai/anthropic Gemeni_API=AIzaSyBZvm_cr5iHgPUpiDTZ7j_r4vYL8yW-tKw +Gemeni_API2=AIzaSyBLBgpw-Gyrf3qiwa5FGCpQxWVMO5JN9Rc