前言
填写网站Cookies,指定文件目录save_directory
,指定最大线程数max_threads
,指定下载链接文件input_file
,运行即可。
代码
import os import requests from bs4 import BeautifulSoup import threading from urllib.parse import urljoin
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0', 'Referer': 'https://mineleak.pro/', 'Cookie': '******' }
save_directory = r'S:\Users\26370\Desktop\Down'
max_threads = 3
os.makedirs(save_directory, exist_ok=True)
def download_file(download_url): try: response = requests.get(download_url, headers=headers, timeout=60)
if response.status_code == 200: content_disposition = response.headers.get('Content-Disposition') if content_disposition: filename = content_disposition.split('filename=')[1].strip('"') else: filename = os.path.basename(urljoin(download_url, '/'))
file_path = os.path.join(save_directory, filename)
if os.path.exists(file_path): print(f"文件 '{filename}' 已存在,跳过下载。") return
with open(file_path, 'wb') as f: f.write(response.content) print(f"文件 '{filename}' 下载完成,保存至 '{file_path}'") else: print(f"下载失败,HTTP响应码:{response.status_code}") except Exception as e: print(f"下载失败:{str(e)}")
input_file = 'download_links copy.txt'
download_links = [] with open(input_file, 'r', encoding='utf-8') as f: for line in f: if '/resources/' in line: download_url = line.strip() + '/download' download_links.append(download_url)
threads = [] for download_url in download_links: thread = threading.Thread(target=download_file, args=(download_url,)) threads.append(thread) thread.start()
if len(threads) >= max_threads: for t in threads: t.join() threads = []
for t in threads: t.join()
print("所有文件下载完成。")
|