import requests from bs4 import BeautifulSoup def open_test(url,header): #打开网页 html = requests.get(url,headers=header) html.encoding= "gb2312" soup = BeautifulSoup(html.text,"html.parser") return soup def down_test(url,header): #展示试卷列表 #试卷列表 test_list = soup.find("ul", class_="c1") test_tr = test_list.find_all("tr") for tr in test_tr: if verson in tr.text: test_td = tr.find("a").get("href") ur = test_td.split("/")[-1] name = tr.find("a").string #试卷的文件名 test_ur = url+"/"+ur #试卷下载网址 print("试卷网址:",test_ur) test = requests.get(test_ur,headers=header) test.encoding="gb2312" test_soup = BeautifulSoup(test.text,"html.parser") test_html = test_soup.find("ul",class_="downurllist") test_u= test_html.find("a").get("href") test_down = requests.get(u+test_u,headers=header) with open(path+"\\"+name+".rar","wb") as f: f.write(test_down.content) if __name__ == '__main__': print( "数据来源:第一试卷网() 仅支持个人研究和学习,商用请联系官方授权.\n") print("声明:本代码仅供学习研究使用,请勿用于商业用途,否则后果自负!") print("声明:本代码仅供学习研究使用,请勿用于商业用途,否则后果自负!") print("声明:本代码仅供学习研究使用,请勿用于商业用途,否则后果自负!\n") sb = ["语文试卷","数学试卷","英语试卷","物理试卷","化学试卷","政治试卷","历史试卷","地理试卷","生物试卷"] gd = "一年级 二年级 三年级 四年级 五年级 六年级 七年级 八年级 九年级 中考试卷 高一 高二 高三 高考试卷" for i in sb: if i =="语文试卷" or i =="数学试卷" or i =="英语试卷": print(f'{i}:{" "}{gd}') elif i =="物理试卷" : print(f'{i}:{" "}{gd[35:]}') elif i=="化学试卷": print(f'{i}:{" "}{gd[40:]}') elif i =="政治试卷" or i =="历史试卷": print(f'{i}:{" "}{gd[30:]}') else: print(f'{i}:{" "}{gd[30:38]}{gd[42:]}') u = "https://www.shijuan1.com/" header={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36"} subject = { "语文试卷":'/a/sjyw',"数学试卷":'/a/sjsx',"英语试卷":'/a/sjyy',"物理试卷":'/a/sjwl',"化学试卷":'/a/sjhx', "政治试卷":'/a/sjzz',"历史试卷":'/a/sjls',"地理试卷":'/a/sjdl',"生物试卷":'/a/sjsw' } grade = { "一年级":"1","二年级":"2","三年级":"3","四年级":"4","五年级":"5","六年级":"6","七年级":"7","八年级":"8","九年级":"9", "中考试卷":"zk","高一":"g1","高二":"g2","高三":"g3","高考试卷":"gk" } s = input("请输入科目名称:") g = input("请输入年级:") url = u + subject[s] + grade[g] verson=input("请输入版本信息:") path = input("请输入要保存文件的路径:") html = requests.get(url, headers=header) html.encoding = "gb2312" soup = BeautifulSoup(html.text, "html.parser") page = soup.find("ul", class_="pagelist") count = page.find("strong").string page_ul = page.find("a").get("href").rsplit("_", 1)[0] for i in range(1, int(count) + 1): if i == 1: down_test(url, header) else: page_url = url + "/" + page_ul + "_" + str(i) + ".html" soup=open_test(page_url, header) down_test(url, header)[s]