批量下载spigotmc插件脚本

前言

本文介绍如何下载spigotmc插件脚本,并使用命令行运行。
提前准备Cookie

安装python环境

pip install requests
pip install bs4

单个插件下载


import requests

headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0',
'Referer': '历史页面链接',
'Cookie': 'cf_clearance=******'
}


# 发送GET请求
url = '下载链接'
response = requests.get(url, headers=headers)

if response.status_code == 200:
# 获取文件名
content_disposition = response.headers.get('Content-Disposition')
if content_disposition:
filename = content_disposition.split('filename=')[1].strip('"')
else:
filename = 'downloaded_file.jar' # 如果服务器未返回文件名,默认使用这个

# 写入文件
with open(filename, 'wb') as f:
f.write(response.content)

print(f"文件 '{filename}' 下载完成。")
else:
print(f"下载失败,HTTP响应码:{response.status_code}")

批量插件下载

指定`save_directory`保存的文件夹路径,填写`base_url`为历史页面链接
import os
import requests
from bs4 import BeautifulSoup
import re

# 定义请求头部信息
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0',
'Referer': '历史页面链接',
'Cookie': '******'
}

# 指定文件保存目录
save_directory = r'文件夹路径'

# 获取历史版本页面内容
base_url = '历史页面链接'
response = requests.get(base_url, headers=headers)

if response.status_code != 200:
print(f"无法获取历史版本页面,HTTP响应码:{response.status_code}")
exit()

html_content = response.text

# 使用BeautifulSoup解析HTML内容,找到历史版本的下载链接
soup = BeautifulSoup(html_content, 'html.parser')

# 查找所有包含历史版本信息的行<tr>
version_rows = soup.find_all('tr', class_='dataRow')

for row in version_rows:
# 查找版本号<td>元素
version_element = row.find('td', class_='version')
if version_element:
version = version_element.text.strip()

# 查找包含下载链接的<td>元素
download_td = row.find('td', class_='dataOptions download')
if download_td:
download_link = download_td.find('a', class_='secondaryContent')['href']
download_url = f'https://www.spigotmc.org/{download_link}'

print(f"解析到版本 {version},下载链接:{download_url}")

try:
# 发送GET请求下载文件
response = requests.get(download_url, headers=headers, timeout=60)

if response.status_code == 200:
content_disposition = response.headers.get('Content-Disposition')
if content_disposition:
filename = content_disposition.split('filename=')[1].strip('"')
else:
filename = f'CMI-{version}.jar' # 使用版本号作为默认文件名

# 构建完整的文件保存路径
file_path = os.path.join(save_directory, filename)

# 保存文件到指定路径
with open(file_path, 'wb') as f:
f.write(response.content)

print(f"版本 {version} 下载完成,文件保存至 '{file_path}'")
else:
print(f"版本 {version} 下载失败,HTTP响应码:{response.status_code}")
except Exception as e:
print(f"下载版本 {version} 失败:{str(e)}")
else:
print(f"版本 {version} 未找到下载链接")
else:
print("未找到版本号")

多线程批量插件下载

使用多线程下载插件,提高下载速度。
`Referer`:历史页面链接
`base_url`:历史页面链接
`Cookie`:你的Cookie
`max_threads`:最大同时下载线程数
`save_directory`:保存的文件夹路径
import os
import requests
from bs4 import BeautifulSoup
import re
import threading

# 定义请求头部信息
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0',
'Referer': '历史页面链接',
'Cookie': '******' # 替换为你的 Cookie
}

# 指定文件保存目录
save_directory = r'文件夹路径'

# 控制同时下载的线程数量
max_threads = 5

# 创建文件夹(如果不存在)
os.makedirs(save_directory, exist_ok=True)

def download_version(version, download_url):
try:
# 构建完整的文件保存路径
filename = f'DisplayShops_{version}.jar'
file_path = os.path.join(save_directory, filename)

# 检查文件是否已经存在,如果存在则跳过下载
if os.path.exists(file_path):
print(f"版本 {version} 的文件 '{filename}' 已存在,跳过下载。")
return

# 发送GET请求下载文件
response = requests.get(download_url, headers=headers, timeout=60)

if response.status_code == 200:
content_disposition = response.headers.get('Content-Disposition')
if content_disposition:
filename = content_disposition.split('filename=')[1].strip('"')
else:
filename = f'CMI-{version}.jar' # 使用版本号作为默认文件名

# 保存文件到指定路径
with open(file_path, 'wb') as f:
f.write(response.content)

print(f"版本 {version} 下载完成,文件保存至 '{file_path}'")
else:
print(f"版本 {version} 下载失败,HTTP响应码:{response.status_code}")
except Exception as e:
print(f"下载版本 {version} 失败:{str(e)}")

# 获取历史版本页面内容
base_url = '历史页面链接'
response = requests.get(base_url, headers=headers)

if response.status_code != 200:
print(f"无法获取历史版本页面,HTTP响应码:{response.status_code}")
exit()

html_content = response.text

# 使用BeautifulSoup解析HTML内容,找到历史版本的下载链接
soup = BeautifulSoup(html_content, 'html.parser')

# 查找所有包含历史版本信息的行<tr>
version_rows = soup.find_all('tr', class_='dataRow')

# 多线程下载历史版本文件
threads = []
for row in version_rows:
# 查找版本号<td>元素
version_element = row.find('td', class_='version')
if version_element:
version = version_element.text.strip()

# 查找包含下载链接的<td>元素
download_td = row.find('td', class_='dataOptions download')
if download_td:
download_link = download_td.find('a', class_='secondaryContent')['href']
download_url = f'https://www.spigotmc.org/{download_link}'

print(f"解析到版本 {version},下载链接:{download_url}")

# 创建线程并启动下载任务
thread = threading.Thread(target=download_version, args=(version, download_url))
threads.append(thread)
thread.start()

# 控制同时运行的线程数量
if len(threads) >= max_threads:
for t in threads:
t.join()
threads = []

# 等待所有线程完成
for t in threads:
t.join()

print("所有历史版本文件下载完成。")

添加下载进度、获取403标题

import os
import requests
from bs4 import BeautifulSoup
import threading
from urllib.parse import urljoin
import time

# 定义请求头部信息
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0',
'Referer': 'https://spigotmc.ru/',
'Cookie': 'xf_csrf=CnqqAGcyuoTn2Vkn; xf_user=7683%2CPLVlYjiSVo1eROsFoF-lDHdzaSRnbnA3V7nxB6a4; spigotmc.ru_b2e6e1b0edda8bfcbc1b1fba0889d325_evc=%5B%220ae0bdb82228b0a7815175d8b8cd4b80%22%5D; xf_session=UXwz30etoGtZe4btuceqS-mtU351kBat; cf_chl_3=a3876aa5ac7c0b0; cf_clearance=fpO1D027.PTMd6M_uhWEzwacMTSVfSocdbbO9ZhmepU-1712847265-1.0.1.1-KPTnNNMzQtgYW_aFPj7iplyATg_7.u4wyaP7OM9ac6qhZ5Or7KKntVB4M2eGlj0xby519fotPylW4pZ5eEnKVw'
}

# 指定文件保存目录
save_directory = r'S:/Users/26370/Desktop/Down/Mods'

# 控制同时下载的线程数量
max_threads = 3

# 创建文件夹(如果不存在)
os.makedirs(save_directory, exist_ok=True)

# 统计下载信息
total_files = 0
downloaded_files = 0

def download_file(download_url):
global downloaded_files

try:
# 发送GET请求下载文件
response = requests.get(download_url, headers=headers, timeout=60)

if response.status_code == 200:
# 解析文件名
content_disposition = response.headers.get('Content-Disposition')
if content_disposition:
filename = content_disposition.split('filename=')[1].strip('"')
else:
filename = os.path.basename(urljoin(download_url, '/'))

# 构建完整的文件保存路径
file_path = os.path.join(save_directory, filename)

# 检查文件是否已经存在,如果存在则跳过下载
if os.path.exists(file_path):
print(f"文件 '{filename}' 已存在,跳过下载。")
return

# 保存文件到指定路径
with open(file_path, 'wb') as f:
f.write(response.content)

downloaded_files += 1
print(f"文件 '{filename}' 下载完成,保存至 '{file_path}'")
elif response.status_code == 403:
# 输出相应的页面内容或标题
soup = BeautifulSoup(response.text, 'html.parser')
body_main = soup.find('div', class_='p-body-main')
if body_main:
print(body_main.text.strip())
else:
print("页面标题:", soup.title.text.strip())
else:
print(f"下载失败,HTTP响应码:{response.status_code}")
except Exception as e:
print(f"下载失败:{str(e)}")

# 从文本文件中读取链接
input_file = 'spigotmc.ru/spigotmc.ru.txt' # 替换为包含链接的文本文件路径

download_links = []
with open(input_file, 'r', encoding='utf-8') as f:
for line in f:
# 添加有效的下载链接到列表
if '/resources/' in line:
download_url = line.strip() + '/download' # 添加/download后缀
download_links.append(download_url)
total_files += 1

# 多线程下载文件
threads = []
for download_url in download_links:
# 创建线程并启动下载任务
thread = threading.Thread(target=download_file, args=(download_url,))
threads.append(thread)
thread.start()

# 控制同时运行的线程数量
if len(threads) >= max_threads:
for t in threads:
t.join()
threads = []
# 打印下载信息
print(f"已下载文件数:{downloaded_files}/{total_files}")

# 添加等待时间5秒
time.sleep(5)

# 等待所有线程完成
for t in threads:
t.join()

print("所有文件下载完成。")