dropbox 迁移到百度网盘
2025-09-23
·
2 分钟阅读时长
先把 dropbox 同步文件到 ubuntu server
安装 dropbox
wget -c https://www.dropbox.com/download?dl=packages/ubuntu/dropbox_2025.05.20_amd64.deb
pip install -y dropbox_2025.05.20_amd64.deb
# 第一次运行时,终端会输出登录链接
~/.dropbox-dist/dropboxd
配置 systemd service
# root@disk:~/Dropbox# cat /etc/systemd/system/dropbox.service
[Unit]
Description=Dropbox Daemon
After=network-online.target
Wants=network-online.target
[Service]
# 从外部文件读代理变量
EnvironmentFile=/etc/dropbox/proxy.env
ExecStart=%h/.dropbox-dist/dropboxd
WorkingDirectory=%h
Restart=on-failure
[Install]
WantedBy=default.target
Dropbox CLI 工具
wget -O ~/dropbox.py "https://www.dropbox.com/download?dl=packages/dropbox.py"
chmod +x ~/dropbox.py
~/dropbox.py status # 查看同步状态
~/dropbox.py filestatus # 查看文件状态
~/dropbox.py start # 启动 Dropbox
~/dropbox.py stop # 停止 Dropbox
从 ubuntu server 上传文件到百度网盘
下载 BaiduPCS-Go CLI
cd /usr/local/bin
# 自行从 GitHub “BaiduPCS-Go” 最新 release 下载 linux_amd64 压缩包并解压
# 假设得到可执行文件 BaiduPCS-Go
chmod +x BaiduPCS-Go
# 登录(首次)
# 终端会打印一个登录链接或二维码说明;按提示在浏览器完成登录
BaiduPCS-Go login
配置 systemd service
# cat /usr/local/bin/baidupcs-upload-loop.sh
#!/usr/bin/env bash
set -Eeuo pipefail
# === 可调参数 ===
SRC="/root/Dropbox/"
DST="/Dropbox/"
PARALLEL=1 # -p 1,尽量避免并发触发 bug
MAX_RETRY=0 # 0 = 无限重试;也可设成 100 之类
BASE_SLEEP=10 # 首次重试等待秒数
MAX_SLEEP=300 # 最大等待秒数
LOG="/var/log/baidupcs-upload.log"
# 只允许运行一个实例
exec 9>/var/run/baidupcs-upload.lock
flock -n 9 || { echo "Another upload is running, exit."; exit 0; }
# 提高文件句柄上限,避免大目录遍历卡死
ulimit -n 1048576 || true
# 可选:让代理在脚本里生效(如果你需要)
# export http_proxy=...
# export https_proxy=...
# export HTTP_PROXY=...
# export HTTPS_PROXY=...
# export NO_PROXY=localhost,127.0.0.1,::1
attempt=0
while :; do
ts="$(date '+%F %T')"
echo "[$ts] Start upload attempt #$((attempt+1))" | tee -a "$LOG"
# 真正的上传命令;加 --nocheck 让已存在的文件更快跳过(如果你的版本支持;不支持就删掉)
/usr/local/bin/BaiduPCS-Go upload -p "$PARALLEL" "$SRC" "$DST" \
>>"$LOG" 2>&1
rc=$?
# 常见场景:已传完/已存在 -> 命令正常退出(rc=0),直接结束循环
if [[ $rc -eq 0 ]]; then
echo "[$(date '+%F %T')] Upload finished successfully. Exit." | tee -a "$LOG"
exit 0
fi
attempt=$((attempt+1))
if [[ $MAX_RETRY -ne 0 && $attempt -ge $MAX_RETRY ]]; then
echo "[$(date '+%F %T')] Reach max retry ($MAX_RETRY). Exit with rc=$rc" | tee -a "$LOG"
exit "$rc"
fi
# 退避等待后重来(崩溃/网络抖动/服务端 5xx 都会走到这里)
sleep_sec=$(( BASE_SLEEP << (attempt-1) ))
(( sleep_sec > MAX_SLEEP )) && sleep_sec=$MAX_SLEEP
echo "[$(date '+%F %T')] Upload failed (rc=$rc). Retry in ${sleep_sec}s..." | tee -a "$LOG"
sleep "$sleep_sec"
done
# cat /etc/systemd/system/baidupcs-upload.timer
[Unit]
Description=Run baidupcs-upload every 10 minutes
[Timer]
OnBootSec=5min
OnUnitActiveSec=10min
Persistent=true
Unit=baidupcs-upload.service
[Install]
WantedBy=timers.target
# cat /etc/systemd/system/baidupcs-upload.service
[Unit]
Description=Resilient BaiduPCS-Go upload loop (/root/Dropbox/ -> /Dropbox/)
Wants=network-online.target
After=network-online.target
[Service]
Type=simple
User=root
ExecStart=/usr/local/bin/baidupcs-upload-loop.sh
Restart=always
RestartSec=5s
# 把输出集中到统一日志文件(也可交给 journald)
StandardOutput=append:/var/log/baidupcs-upload.log
StandardError=append:/var/log/baidupcs-upload.log
# 降低资源优先级
Nice=10
IOSchedulingClass=best-effort
IOSchedulingPriority=7
# 保护:避免日志太大,也可以配 logrotate
#(如果你用 journald,这两行可删)