前言 本文介绍如何下载spigotmc插件脚本,并使用命令行运行。
提前准备Cookie
安装python环境 pip install requests pip install bs4
单个插件下载 import requestsheaders = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0' , 'Referer' : '历史页面链接' , 'Cookie' : 'cf_clearance=******' } url = '下载链接' response = requests.get(url, headers=headers) if response.status_code == 200 : content_disposition = response.headers.get('Content-Disposition' ) if content_disposition: filename = content_disposition.split('filename=' )[1 ].strip('"' ) else : filename = 'downloaded_file.jar' with open (filename, 'wb' ) as f: f.write(response.content) print (f"文件 '{filename} ' 下载完成。" ) else : print (f"下载失败,HTTP响应码:{response.status_code} " )
批量插件下载 指定`save_directory`保存的文件夹路径,填写`base_url`为历史页面链接
import osimport requestsfrom bs4 import BeautifulSoupimport reheaders = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0' , 'Referer' : '历史页面链接' , 'Cookie' : '******' } save_directory = r'文件夹路径' base_url = '历史页面链接' response = requests.get(base_url, headers=headers) if response.status_code != 200 : print (f"无法获取历史版本页面,HTTP响应码:{response.status_code} " ) exit() html_content = response.text soup = BeautifulSoup(html_content, 'html.parser' ) version_rows = soup.find_all('tr' , class_='dataRow' ) for row in version_rows: version_element = row.find('td' , class_='version' ) if version_element: version = version_element.text.strip() download_td = row.find('td' , class_='dataOptions download' ) if download_td: download_link = download_td.find('a' , class_='secondaryContent' )['href' ] download_url = f'https://www.spigotmc.org/{download_link} ' print (f"解析到版本 {version} ,下载链接:{download_url} " ) try : response = requests.get(download_url, headers=headers, timeout=60 ) if response.status_code == 200 : content_disposition = response.headers.get('Content-Disposition' ) if content_disposition: filename = content_disposition.split('filename=' )[1 ].strip('"' ) else : filename = f'CMI-{version} .jar' file_path = os.path.join(save_directory, filename) with open (file_path, 'wb' ) as f: f.write(response.content) print (f"版本 {version} 下载完成,文件保存至 '{file_path} '" ) else : print (f"版本 {version} 下载失败,HTTP响应码:{response.status_code} " ) except Exception as e: print (f"下载版本 {version} 失败:{str (e)} " ) else : print (f"版本 {version} 未找到下载链接" ) else : print ("未找到版本号" )
多线程批量插件下载 使用多线程下载插件,提高下载速度。
`Referer`:历史页面链接
`base_url`:历史页面链接
`Cookie`:你的Cookie
`max_threads`:最大同时下载线程数
`save_directory`:保存的文件夹路径
import osimport requestsfrom bs4 import BeautifulSoupimport reimport threadingheaders = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0' , 'Referer' : '历史页面链接' , 'Cookie' : '******' } save_directory = r'文件夹路径' max_threads = 5 os.makedirs(save_directory, exist_ok=True ) def download_version (version, download_url ): try : filename = f'DisplayShops_{version} .jar' file_path = os.path.join(save_directory, filename) if os.path.exists(file_path): print (f"版本 {version} 的文件 '{filename} ' 已存在,跳过下载。" ) return response = requests.get(download_url, headers=headers, timeout=60 ) if response.status_code == 200 : content_disposition = response.headers.get('Content-Disposition' ) if content_disposition: filename = content_disposition.split('filename=' )[1 ].strip('"' ) else : filename = f'CMI-{version} .jar' with open (file_path, 'wb' ) as f: f.write(response.content) print (f"版本 {version} 下载完成,文件保存至 '{file_path} '" ) else : print (f"版本 {version} 下载失败,HTTP响应码:{response.status_code} " ) except Exception as e: print (f"下载版本 {version} 失败:{str (e)} " ) base_url = '历史页面链接' response = requests.get(base_url, headers=headers) if response.status_code != 200 : print (f"无法获取历史版本页面,HTTP响应码:{response.status_code} " ) exit() html_content = response.text soup = BeautifulSoup(html_content, 'html.parser' ) version_rows = soup.find_all('tr' , class_='dataRow' ) threads = [] for row in version_rows: version_element = row.find('td' , class_='version' ) if version_element: version = version_element.text.strip() download_td = row.find('td' , class_='dataOptions download' ) if download_td: download_link = download_td.find('a' , class_='secondaryContent' )['href' ] download_url = f'https://www.spigotmc.org/{download_link} ' print (f"解析到版本 {version} ,下载链接:{download_url} " ) thread = threading.Thread(target=download_version, args=(version, download_url)) threads.append(thread) thread.start() if len (threads) >= max_threads: for t in threads: t.join() threads = [] for t in threads: t.join() print ("所有历史版本文件下载完成。" )
添加下载进度、获取403标题
import osimport requestsfrom bs4 import BeautifulSoupimport threadingfrom urllib.parse import urljoinimport timeheaders = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0' , 'Referer' : 'https://spigotmc.ru/' , 'Cookie' : 'xf_csrf=CnqqAGcyuoTn2Vkn; xf_user=7683%2CPLVlYjiSVo1eROsFoF-lDHdzaSRnbnA3V7nxB6a4; spigotmc.ru_b2e6e1b0edda8bfcbc1b1fba0889d325_evc=%5B%220ae0bdb82228b0a7815175d8b8cd4b80%22%5D; xf_session=UXwz30etoGtZe4btuceqS-mtU351kBat; cf_chl_3=a3876aa5ac7c0b0; cf_clearance=fpO1D027.PTMd6M_uhWEzwacMTSVfSocdbbO9ZhmepU-1712847265-1.0.1.1-KPTnNNMzQtgYW_aFPj7iplyATg_7.u4wyaP7OM9ac6qhZ5Or7KKntVB4M2eGlj0xby519fotPylW4pZ5eEnKVw' } save_directory = r'S:/Users/26370/Desktop/Down/Mods' max_threads = 3 os.makedirs(save_directory, exist_ok=True ) total_files = 0 downloaded_files = 0 def download_file (download_url ): global downloaded_files try : response = requests.get(download_url, headers=headers, timeout=60 ) if response.status_code == 200 : content_disposition = response.headers.get('Content-Disposition' ) if content_disposition: filename = content_disposition.split('filename=' )[1 ].strip('"' ) else : filename = os.path.basename(urljoin(download_url, '/' )) file_path = os.path.join(save_directory, filename) if os.path.exists(file_path): print (f"文件 '{filename} ' 已存在,跳过下载。" ) return with open (file_path, 'wb' ) as f: f.write(response.content) downloaded_files += 1 print (f"文件 '{filename} ' 下载完成,保存至 '{file_path} '" ) elif response.status_code == 403 : soup = BeautifulSoup(response.text, 'html.parser' ) body_main = soup.find('div' , class_='p-body-main' ) if body_main: print (body_main.text.strip()) else : print ("页面标题:" , soup.title.text.strip()) else : print (f"下载失败,HTTP响应码:{response.status_code} " ) except Exception as e: print (f"下载失败:{str (e)} " ) input_file = 'spigotmc.ru/spigotmc.ru.txt' download_links = [] with open (input_file, 'r' , encoding='utf-8' ) as f: for line in f: if '/resources/' in line: download_url = line.strip() + '/download' download_links.append(download_url) total_files += 1 threads = [] for download_url in download_links: thread = threading.Thread(target=download_file, args=(download_url,)) threads.append(thread) thread.start() if len (threads) >= max_threads: for t in threads: t.join() threads = [] print (f"已下载文件数:{downloaded_files} /{total_files} " ) time.sleep(5 ) for t in threads: t.join() print ("所有文件下载完成。" )